repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_conv2d_b.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
self.conv = nn.Conv2D(
filters=64,
kernel_size=(7, 7),
strides=1,
padding="same",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = self.conv(x)
return x
def gl_calc(gl_w, x):
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=(7, 7),
strides=1,
padding=(3, 3),
use_bias=False,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
# gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
return gl_y
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 256, 3) if data_format == "channels_last" else (1, 3, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_params["conv/kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 3,744 | 26.947761 | 94 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_batchnorm.py | import numpy as np
import mxnet as mx
import tensorflow as tf
LENGTH = 64
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.bn = mx.gluon.nn.BatchNorm(
momentum=0.9,
epsilon=1e-5,
in_channels=LENGTH,
use_global_stats=False)
def hybrid_forward(self, F, x):
x = self.bn(x)
return x
def batchnorm(x,
momentum=0.9,
epsilon=1e-5,
training=False,
name=None):
"""
Batch normalization layer.
Parameters:
----------
x : Tensor
Input tensor.
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = tf.layers.batch_normalization(
inputs=x,
axis=1,
momentum=momentum,
epsilon=epsilon,
training=training,
name=name)
return x
def tensorflow_model(x):
x = batchnorm(
x=x,
training=False,
name="bn")
return x
def main():
success = True
for i in range(10):
g = np.random.randn(LENGTH, ).astype(np.float32)
b = np.random.randn(LENGTH, ).astype(np.float32)
m = np.random.randn(LENGTH, ).astype(np.float32)
v = np.random.randn(LENGTH, ).astype(np.float32)
b = b - b.min() + 1.0
v = v - v.min() + 1.0
IMG_SIZE = 224
x = np.random.randn(10, LENGTH, IMG_SIZE, IMG_SIZE).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['bn.gamma']._load_init(mx.nd.array(g, ctx), ctx)
gl_params['bn.beta']._load_init(mx.nd.array(b, ctx), ctx)
gl_params['bn.running_mean']._load_init(mx.nd.array(m, ctx), ctx)
gl_params['bn.running_var']._load_init(mx.nd.array(v, ctx), ctx)
# gl_model.initialize()
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, LENGTH, IMG_SIZE, IMG_SIZE),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
sess.run(tf_params['bn/gamma:0'].assign(g))
sess.run(tf_params['bn/beta:0'].assign(b))
sess.run(tf_params['bn/moving_mean:0'].assign(m))
sess.run(tf_params['bn/moving_variance:0'].assign(v))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
diff = np.abs(gl_y - tf_y)
dist = np.sum(diff)
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 3,380 | 25.414063 | 78 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_dense.py | import numpy as np
import mxnet as mx
import tensorflow as tf
# import tensorflow.contrib.slim as slim
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dense = mx.gluon.nn.Dense(
units=1000,
use_bias=False,
flatten=True,
in_units=1024)
def hybrid_forward(self, F, x):
x = self.dense(x)
return x
def tensorflow_model(x):
# x = slim.fully_connected(
# inputs=x,
# num_outputs=1000,
# activation_fn=None,
# scope='dense')
x = tf.layers.dense(
inputs=x,
units=1000,
use_bias=False,
name="dense")
return x
def main():
success = True
for i in range(10):
# gl_w = np.random.randn(1000, 1024).astype(np.float32)
tf_w = np.random.randn(1024, 1000).astype(np.float32)
# b = np.random.randn(1000, ).astype(np.float32)
x = np.random.randn(1, 1024).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_w = np.transpose(tf_w, axes=(1, 0))
gl_params['dense.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['dense.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 1024),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
# tf_w = np.transpose(gl_w, axes=(1, 0))
sess.run(tf_params['dense/kernel:0'].assign(tf_w))
# sess.run(tf_params['dense/bias:0'].assign(b))
# sess.run(tf_params['dense/weights:0'].assign(tf_w))
# sess.run(tf_params['dense/biases:0'].assign(b))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
y = np.matmul(gl_w.astype(np.float64), x[0].astype(np.float64))
# y = np.dot(w, x[0])
gl_dist = np.sum(np.abs(gl_y - y))
tf_dist = np.sum(np.abs(tf_y - y))
print("i={}, gl_dist={}".format(i, gl_dist))
print("i={}, tf_dist={}".format(i, tf_dist))
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,765 | 27.8125 | 75 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_maxpool2d.py | import math
import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.pool = mx.gluon.nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
def maxpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
name=None):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = x.shape[2]
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding[0] += 1
width = x.shape[3]
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding[1] += 1
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)], mode="REFLECT")
x = tf.layers.max_pooling2d(
inputs=x,
pool_size=pool_size,
strides=strides,
padding='valid',
data_format='channels_first',
name=name)
# if isinstance(pool_size, int):
# pool_size = (pool_size, pool_size)
# if isinstance(strides, int):
# strides = (strides, strides)
# x = tf.nn.max_pool(
# value=x,
# ksize=(1, 1) + pool_size,
# strides=(1, 1) + strides,
# padding='VALID',
# data_format='NCHW',
# name=name)
return x
def tensorflow_model(x):
x = maxpool2d(
x=x,
pool_size=2,
strides=2,
padding=0,
ceil_mode=False,
name="pool")
return x
def main():
success = True
for i in range(10):
x = np.random.randn(10, 10, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 10, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
with tf.Session() as sess:
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 3,444 | 23.607143 | 85 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_lstm.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
class TF2Model(tf.keras.Model):
def __init__(self,
**kwargs):
super(TF2Model, self).__init__(**kwargs)
# self.rnn = nn.LSTM(
# units=100,
# dropout=0.2,
# name="rnn")
# self.rnn = nn.RNN([nn.LSTMCell(
# units=100,
# dropout=0.2,
# unit_forget_bias=False,
# name="rnn{}".format(i)
# ) for i in range(2)])
self.rnn = nn.RNN([tf.compat.v1.nn.rnn_cell.LSTMCell(
num_units=100,
use_peepholes=False,
name="rnn{}".format(i)
) for i in range(2)])
def call(self, x):
x = self.rnn(x)
return x
def gl_calc():
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.rnn = mx.gluon.rnn.LSTM(
hidden_size=100,
num_layers=2,
dropout=0.2,
input_size=80)
def hybrid_forward(self, F, x):
x = self.rnn(x)
# src_params = self._collect_params_with_prefix()
# src_param_keys = list(src_params.keys())
# src_params[src_param_keys[0]]._data[0].asnumpy()
# dst_params[dst_key]._load_init(mx.nd.array(src_params[src_key].numpy(), ctx), ctx)
return x
gl_model = GluonModel()
# # ctx = mx.cpu()
# ctx = mx.gpu(0)
# gl_params = gl_model._collect_params_with_prefix()
# # gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
# gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# # gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
#
# gl_x = mx.nd.array(x, ctx)
# gl_y = gl_model(gl_x).asnumpy()
ctx = mx.gpu(0)
gl_x = mx.nd.zeros((3, 7, 80), ctx)
gl_model.initialize(ctx=ctx)
gl_model(gl_x)
# gl_params = gl_model._collect_params_with_prefix()
_calc_width(gl_model)
return gl_model
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
# tf2_model = TF2Model()
# batch_size = 1
# input_shape = (3, 7, 80)
# tf2_model(tf.random.normal(input_shape))
# dst_param_keys = [v.name for v in tf2_model.weights]
# dst_params = {v.name: v for v in tf2_model.weights}
#
# gl_calc()
gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 256, 3) if data_format == "channels_last" else (1, 3, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_params["conv/kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 4,631 | 29.675497 | 96 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_batchnorm.py | import numpy as np
import mxnet as mx
import tensorflow as tf
import tensorflow.keras.layers as nn
LENGTH = 64
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.bn = mx.gluon.nn.BatchNorm(
momentum=0.9,
epsilon=1e-5,
in_channels=LENGTH,
use_global_stats=False)
def hybrid_forward(self, F, x):
x = self.bn(x)
return x
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
def get_channel_axis(data_format):
"""
Get channel axis.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
int
Channel axis.
"""
return 1 if is_channels_first(data_format) else -1
class BatchNorm(nn.BatchNormalization):
"""
MXNet/Gluon-like batch normalization.
Parameters:
----------
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
momentum=0.9,
epsilon=1e-5,
data_format="channels_last",
**kwargs):
super(BatchNorm, self).__init__(
axis=get_channel_axis(data_format),
momentum=momentum,
epsilon=epsilon,
**kwargs)
class TF2Model(tf.keras.Model):
def __init__(self,
bn_eps=1e-5,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.bn(x, training=training)
return x
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
g = np.random.randn(LENGTH, ).astype(np.float32)
b = np.random.randn(LENGTH, ).astype(np.float32)
m = np.random.randn(LENGTH, ).astype(np.float32)
v = np.random.randn(LENGTH, ).astype(np.float32)
b = b - b.min() + 1.0
v = v - v.min() + 1.0
IMG_SIZE = 224
x = np.random.randn(10, LENGTH, IMG_SIZE, IMG_SIZE).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['bn.gamma']._load_init(mx.nd.array(g, ctx), ctx)
gl_params['bn.beta']._load_init(mx.nd.array(b, ctx), ctx)
gl_params['bn.running_mean']._load_init(mx.nd.array(m, ctx), ctx)
gl_params['bn.running_var']._load_init(mx.nd.array(v, ctx), ctx)
# gl_model.initialize()
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, IMG_SIZE, IMG_SIZE, LENGTH) if data_format == "channels_last" else\
(1, LENGTH, IMG_SIZE, IMG_SIZE)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
tf2_params["bn/gamma:0"].assign(g)
tf2_params["bn/beta:0"].assign(b)
tf2_params["bn/moving_mean:0"].assign(m)
tf2_params["bn/moving_variance:0"].assign(v)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
diff = np.abs(gl_y - tf2_y)
dist = diff.mean()
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,796 | 26.568966 | 93 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2pt_batchnorm.py | import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
LENGTH = 64
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.bn = mx.gluon.nn.BatchNorm(
momentum=0.9,
epsilon=1e-5,
in_channels=LENGTH,
use_global_stats=False)
def hybrid_forward(self, F, x):
x = self.bn(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.bn = torch.nn.BatchNorm2d(
num_features=LENGTH,
eps=1e-5,
momentum=0.9)
def forward(self, x):
x = self.bn(x)
return x
def main():
success = True
for i in range(10):
g = np.random.randn(LENGTH, ).astype(np.float32)
b = np.random.randn(LENGTH, ).astype(np.float32)
m = np.random.randn(LENGTH, ).astype(np.float32)
v = np.random.randn(LENGTH, ).astype(np.float32)
b = b - b.min() + 1.0
v = v - v.min() + 1.0
IMG_SIZE = 224
x = np.random.randn(1, LENGTH, IMG_SIZE, IMG_SIZE).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['bn.gamma']._load_init(mx.nd.array(g, ctx), ctx)
gl_params['bn.beta']._load_init(mx.nd.array(b, ctx), ctx)
gl_params['bn.running_mean']._load_init(mx.nd.array(m, ctx), ctx)
gl_params['bn.running_var']._load_init(mx.nd.array(v, ctx), ctx)
# gl_model.initialize()
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['bn.weight'] = torch.from_numpy(g)
pt_params['bn.bias'] = torch.from_numpy(b)
pt_params['bn.running_mean'] = torch.from_numpy(m)
pt_params['bn.running_var'] = torch.from_numpy(v)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
diff = np.abs(gl_y - pt_y)
dist = np.sum(diff)
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(pt_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,622 | 25.494949 | 77 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2pt_conv2d.py | import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3,
bias=True)
def forward(self, x):
x = self.conv(x)
return x
def main():
success = True
for i in range(10):
# w = np.random.randint(10, size=(64, 3, 7, 7)).astype(np.float32)
# x = np.random.randint(10, size=(1, 3, 224, 224)).astype(np.float32)
w = np.random.randn(64, 3, 7, 7).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['conv.weight']._load_init(mx.nd.array(w, ctx), ctx)
gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['conv.weight'] = torch.from_numpy(w)
pt_params['conv.bias'] = torch.from_numpy(b)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
dist = np.sum(np.abs(gl_y - pt_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,352 | 24.576087 | 77 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_conv2d.py | import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
# def tensorflow_model(x):
#
# padding = 3
# x = tf.pad(x, [[0, 0], [0, 0], [padding, padding], [padding, padding]])
# x = tf.layers.conv2d(
# inputs=x,
# filters=64,
# kernel_size=7,
# strides=2,
# padding='valid',
# data_format='channels_first',
# use_bias=False,
# name='conv')
# return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if groups != 1:
raise NotImplementedError
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
name=name)
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=3,
out_channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
name="conv")
return x
def main():
success = True
for i in range(10):
# gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
tf_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_w = np.transpose(tf_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
# tf_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
sess.run(tf_params['conv/kernel:0'].assign(tf_w))
sess.run(tf_params['conv/bias:0'].assign(b))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,243 | 24.566265 | 77 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_conv2d.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
padding = (3, 3)
if isinstance(padding, int):
padding = (padding, padding)
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
self.conv = nn.Conv2D(
filters=64,
kernel_size=(7, 7),
strides=2,
padding="valid",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = tf.pad(x, paddings=self.paddings_tf)
x = self.conv(x)
return x
class TF2Model2(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model2, self).__init__(**kwargs)
padding = (3, 3)
if isinstance(padding, int):
padding = (padding, padding)
self.pad = nn.ZeroPadding2D(
padding=padding,
data_format=data_format)
self.conv = nn.Conv2D(
filters=64,
kernel_size=(7, 7),
strides=2,
padding="valid",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = self.pad(x)
x = self.conv(x)
return x
def gl_calc(gl_w, x):
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=(7, 7),
strides=2,
padding=(3, 3),
use_bias=False,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
# gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
return gl_y
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model2(data_format=data_format)
else:
tf2_model = TF2Model2(data_format=data_format)
input_shape = (1, 224, 256, 3) if data_format == "channels_last" else (1, 3, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_params["conv/kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 4,851 | 27.209302 | 94 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_avgpool2d.py | import math
import numpy as np
import mxnet as mx
import tensorflow as tf
import tensorflow.keras.layers as nn
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.pool = mx.gluon.nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1,
ceil_mode=True,
count_include_pad=True)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
def is_channels_first(data_format):
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
pool_size=3,
strides=2,
padding=1,
ceil_mode=True,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
self.use_stride_pool = (strides[0] > 1) or (strides[1] > 1)
self.ceil_mode = ceil_mode and self.use_stride_pool
self.use_pad = (padding[0] > 0) or (padding[1] > 0)
if self.ceil_mode:
self.padding = padding
self.pool_size = pool_size
self.strides = strides
self.data_format = data_format
elif self.use_pad:
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
self.pool = nn.AveragePooling2D(
pool_size=pool_size,
strides=1,
padding="valid",
data_format=data_format,
name="pool")
if self.use_stride_pool:
self.stride_pool = nn.AveragePooling2D(
pool_size=1,
strides=strides,
padding="valid",
data_format=data_format,
name="stride_pool")
def call(self, x):
if self.ceil_mode:
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
padding = self.padding
out_height = float(height + 2 * padding[0] - self.pool_size[0]) / self.strides[0] + 1.0
out_width = float(width + 2 * padding[1] - self.pool_size[1]) / self.strides[1] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(self.data_format):
paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
elif self.use_pad:
x = tf.pad(x, paddings=self.paddings_tf)
x = self.pool(x)
if self.use_stride_pool:
x = self.stride_pool(x)
return x
def main():
success = True
for i in range(10):
x = np.random.randn(12, 10, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 224, 10) if data_format == "channels_last" else (1, 10, 224, 224)
tf2_model.build(input_shape=input_shape)
# tf2_params = {v.name: v for v in tf2_model.weights}
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,921 | 30.961039 | 99 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2pt_dense.py | import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dense = mx.gluon.nn.Dense(
units=1000,
use_bias=False,
in_units=1024)
def hybrid_forward(self, F, x):
x = self.dense(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.dense = torch.nn.Linear(
in_features=1024,
out_features=1000,
bias=False)
def forward(self, x):
x = self.dense(x)
return x
def main():
success = True
for i in range(10):
w = np.random.randn(1000, 1024).astype(np.float32)
# b = np.random.randn(1000, ).astype(np.float32)
x = np.random.randn(1, 1024).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['dense.weight']._load_init(mx.nd.array(w, ctx), ctx)
# gl_params['dense.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['dense.weight'] = torch.from_numpy(w)
# pt_params['dense.bias'] = torch.from_numpy(b)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
dist = np.sum(np.abs(gl_y - pt_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(pt_y)
y = np.matmul(w.astype(np.float64), x[0].astype(np.float64))
# y = np.dot(w, x[0])
gl_dist = np.sum(np.abs(gl_y - y))
pt_dist = np.sum(np.abs(pt_y - y))
print("i={}, gl_dist={}".format(i, gl_dist))
print("i={}, pt_dist={}".format(i, pt_dist))
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,369 | 25.333333 | 72 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_gconv2d.py | import numpy as np
import mxnet as mx
import tensorflow as tf
GROUPS = 8
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.g_conv = mx.gluon.nn.Conv2D(
channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=GROUPS,
use_bias=False,
in_channels=128)
def hybrid_forward(self, F, x):
x = self.g_conv(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
if groups == 1:
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name)
elif (groups == out_channels) and (out_channels == in_channels):
kernel = tf.get_variable(
name=name + '/dw_kernel',
shape=kernel_size + (in_channels, 1),
initializer=tf.variance_scaling_initializer(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides,
padding='VALID',
rate=(1, 1),
name=name,
data_format='NCHW')
if use_bias:
raise NotImplementedError
else:
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
in_group_channels = in_channels // groups
out_group_channels = out_channels // groups
group_list = []
for gi in range(groups):
xi = x[:, gi * in_group_channels:(gi + 1) * in_group_channels, :, :]
xi = tf.layers.conv2d(
inputs=xi,
filters=out_group_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name + "/convgroup{}".format(gi + 1))
group_list.append(xi)
x = tf.concat(group_list, axis=1, name=name + "/concat")
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=128,
out_channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=GROUPS,
use_bias=False,
name="g_conv")
return x
def main():
success = True
for i in range(10):
w = np.random.randn(32, 16, 7, 7).astype(np.float32)
x = np.random.randn(10, 128, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['g_conv.weight']._load_init(mx.nd.array(w, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 128, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
w_list = np.split(w, axis=0, indices_or_sections=GROUPS)
for gi in range(GROUPS):
w_gi = w_list[gi]
tf_w = np.transpose(w_gi, axes=(2, 3, 1, 0))
sess.run(tf_params['g_conv/convgroup{}/kernel:0'.format(gi + 1)].assign(tf_w))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 5,334 | 27.37766 | 94 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_dwconv2d.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
channels = 12
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
self.conv = nn.DepthwiseConv2D(
# filters=channels,
kernel_size=(7, 7),
strides=2,
padding="same",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = self.conv(x)
return x
def gl_calc(gl_w, x):
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=channels,
kernel_size=(7, 7),
strides=2,
padding=(3, 3),
groups=channels,
use_bias=False,
in_channels=channels)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
# gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
return gl_y
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
gl_w = np.random.randn(channels, 1, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 1, channels).astype(np.float32)
b = np.random.randn(channels, ).astype(np.float32)
x = np.random.randn(10, channels, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 256, channels) if data_format == "channels_last" else (1, channels, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 0, 1))
tf2_params["conv/depthwise_kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 3,875 | 27.291971 | 108 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_avgpool2d.py | # import math
import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.pool = mx.gluon.nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
# def avgpool2d(x,
# pool_size,
# strides,
# padding=0,
# ceil_mode=False,
# name=None):
# """
# Average pooling operation for two dimensional (spatial) data.
#
# Parameters:
# ----------
# x : Tensor
# Input tensor.
# pool_size : int or tuple/list of 2 int
# Size of the max pooling windows.
# strides : int or tuple/list of 2 int
# Strides of the pooling.
# padding : int or tuple/list of 2 int, default 0
# Padding value for convolution layer.
# ceil_mode : bool, default False
# When `True`, will use ceil instead of floor to compute the output shape.
# name : str, default 'conv2d'
# Layer name.
#
# Returns:
# -------
# Tensor
# Resulted tensor.
# """
# if isinstance(padding, int):
# padding = (padding, padding)
#
# if ceil_mode:
# height = x.shape[2]
# out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
# if math.ceil(out_height) > math.floor(out_height):
# padding[0] += 1
# width = x.shape[3]
# out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
# if math.ceil(out_width) > math.floor(out_width):
# padding[1] += 1
#
# if (padding[0] > 0) or (padding[1] > 0):
# x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)], mode="REFLECT")
#
# x = tf.layers.average_pooling2d(
# inputs=x,
# pool_size=pool_size,
# strides=strides,
# padding='valid',
# data_format='channels_first',
# name=name)
# return x
def tensorflow_model(x):
x = tf.layers.average_pooling2d(
inputs=x,
pool_size=2,
strides=2,
padding='valid',
data_format='channels_first',
name="pool")
# x = avgpool2d(
# x=x,
# pool_size=2,
# strides=2,
# padding=1,
# ceil_mode=False,
# name="pool")
return x
def main():
success = True
for i in range(10):
x = np.random.randn(10, 10, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 10, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
with tf.Session() as sess:
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 3,395 | 24.343284 | 87 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_dwconv2d.py | import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = mx.gluon.nn.Conv2D(
channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=32,
use_bias=False,
in_channels=32)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
if groups == 1:
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name)
elif (groups == out_channels) and (out_channels == in_channels):
kernel = tf.get_variable(
name=name + '/dw_kernel',
shape=kernel_size + (in_channels, 1),
initializer=tf.variance_scaling_initializer(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides,
padding='VALID',
rate=(1, 1),
name=name,
data_format='NCHW')
if use_bias:
raise NotImplementedError
else:
raise NotImplementedError
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=32,
out_channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=32,
use_bias=False,
name="dw_conv")
return x
def main():
success = True
for i in range(10):
# gl_w = np.random.randn(32, 1, 7, 7).astype(np.float32)
tf_w = np.random.randn(7, 7, 32, 1).astype(np.float32)
x = np.random.randn(10, 32, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_w = np.transpose(tf_w, axes=(2, 3, 0, 1))
gl_params['dw_conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 32, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
# tf_w = np.transpose(gl_w, axes=(2, 3, 0, 1))
sess.run(tf_params['dw_conv/dw_kernel:0'].assign(tf_w))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,434 | 25.716867 | 83 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_conv1x1.py | import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
# def tensorflow_model(x):
#
# padding = 3
# x = tf.pad(x, [[0, 0], [0, 0], [padding, padding], [padding, padding]])
# x = tf.layers.conv2d(
# inputs=x,
# filters=64,
# kernel_size=7,
# strides=2,
# padding='valid',
# data_format='channels_first',
# use_bias=False,
# name='conv')
# return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if groups != 1:
raise NotImplementedError
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
name=name)
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=3,
out_channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
name="conv")
return x
def main():
success = True
for i in range(10):
# w = np.random.randint(10, size=(64, 3, 7, 7)).astype(np.float32)
# x = np.random.randint(10, size=(1, 3, 224, 224)).astype(np.float32)
w = np.random.randn(64, 3, 7, 7).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['conv.weight']._load_init(mx.nd.array(w, ctx), ctx)
gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
tf_w = np.transpose(w, axes=(2, 3, 1, 0))
sess.run(tf_params['conv/kernel:0'].assign(tf_w))
sess.run(tf_params['conv/bias:0'].assign(b))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,267 | 24.710843 | 77 | py |
imgclsmob | imgclsmob-master/gluon/lr_scheduler.py | from math import pi, cos
from mxnet import lr_scheduler
class LRScheduler(lr_scheduler.LRScheduler):
"""
Learning Rate Scheduler
For mode='step', we multiply lr with `step_factor` at each epoch in `step`.
For mode='poly'::
lr = targetlr + (baselr - targetlr) * (1 - iter / maxiter) ^ power
For mode='cosine'::
lr = targetlr + (baselr - targetlr) * (1 + cos(pi * iter / maxiter)) / 2
If warmup_epochs > 0, a warmup stage will be inserted before the main lr scheduler.
For warmup_mode='linear'::
lr = warmup_lr + (baselr - warmup_lr) * iter / max_warmup_iter
For warmup_mode='constant'::
lr = warmup_lr
Parameters:
----------
mode : str
Modes for learning rate scheduler. Currently it supports 'step', 'poly' and 'cosine'.
base_lr : float
Base learning rate, i.e. the starting learning rate.
n_iters : int
Number of iterations in each epoch.
n_epochs : int
Number of training epochs.
step : list
A list of epochs to decay the learning rate.
step_factor : float
Learning rate decay factor.
target_lr : float
Target learning rate for poly and cosine, as the ending learning rate.
power : float
Power of poly function.
warmup_epochs : int
Number of epochs for the warmup stage.
warmup_lr : float
The base learning rate for the warmup stage.
warmup_mode : str
Modes for the warmup stage. Currently it supports 'linear' and 'constant'.
"""
def __init__(self,
mode,
base_lr,
n_iters,
n_epochs,
step=(30, 60, 90),
step_factor=0.1,
target_lr=0,
power=0.9,
warmup_epochs=0,
warmup_lr=0,
warmup_mode="linear"):
super(LRScheduler, self).__init__(base_lr=base_lr)
assert(mode in ["step", "poly", "cosine"])
assert(warmup_mode in ["constant", "linear", "poly", "cosine"])
self.mode = mode
self.learning_rate = self.base_lr
self.n_iters = n_iters
self.step = step
self.step_factor = step_factor
self.target_lr = target_lr
self.power = power
self.warmup_epochs = warmup_epochs
self.warmup_lr = warmup_lr
self.warmup_mode = warmup_mode
self.N = n_epochs * n_iters
self.warmup_N = warmup_epochs * n_iters
def __call__(self, num_update):
return self.learning_rate
def update(self, i, epoch):
t = epoch * self.n_iters + i
assert (t >= 0) and (t <= self.N)
t = float(t)
if epoch < self.warmup_epochs:
# Warm-up Stage
if self.warmup_mode == "constant":
self.learning_rate = self.warmup_lr
else:
base_lr_real = self.base_lr - self.warmup_lr
t_rel = t / self.warmup_N
if self.warmup_mode == "linear":
self.learning_rate = self.warmup_lr + base_lr_real * t_rel
elif self.warmup_mode == "poly":
self.learning_rate = self.warmup_lr + base_lr_real * pow(t_rel, self.power)
elif self.warmup_mode == "cosine":
self.learning_rate = self.warmup_lr + base_lr_real * 0.5 * (1.0 + cos(pi + pi * t_rel))
else:
raise NotImplementedError
else:
if self.mode == "step":
count = sum([1 for s in self.step if s <= epoch])
self.learning_rate = self.base_lr * pow(self.step_factor, count)
else:
base_lr_real = self.base_lr - self.target_lr
t_rel = (t - self.warmup_N) / (self.N - self.warmup_N)
if self.mode == "poly":
self.learning_rate = self.target_lr + base_lr_real * pow(1 - t_rel, self.power)
elif self.mode == "cosine":
self.learning_rate = self.target_lr + base_lr_real * (1 + cos(pi * t_rel)) / 2
else:
raise NotImplementedError
| 4,213 | 33.260163 | 107 | py |
imgclsmob | imgclsmob-master/gluon/losses.py | """
Loss functions.
"""
__all__ = ['SegSoftmaxCrossEntropyLoss', 'MixSoftmaxCrossEntropyLoss']
from mxnet.gluon.loss import Loss, _reshape_like
class SegSoftmaxCrossEntropyLoss(Loss):
"""
SoftmaxCrossEntropyLoss with ignore labels (for segmentation task).
Parameters:
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
ignore_label : int, default -1
The label to ignore.
size_average : bool, default False
Whether to re-scale loss with regard to ignored labels.
"""
def __init__(self,
sparse_label=True,
batch_axis=0,
ignore_label=-1,
size_average=True,
**kwargs):
super(SegSoftmaxCrossEntropyLoss, self).__init__(None, batch_axis, **kwargs)
self._sparse_label = sparse_label
self._ignore_label = ignore_label
self._size_average = size_average
def hybrid_forward(self, F, pred, label):
"""
Compute loss.
"""
softmaxout = F.SoftmaxOutput(
pred,
label.astype(pred.dtype),
ignore_label=self._ignore_label,
multi_output=self._sparse_label,
use_ignore=True,
normalization=("valid" if self._size_average else "null"))
if self._sparse_label:
loss = -F.pick(F.log(softmaxout), label, axis=1, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(F.log(softmaxout) * label, axis=-1, keepdims=True)
loss = F.where(label.expand_dims(axis=1) == self._ignore_label, F.zeros_like(loss), loss)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class MixSoftmaxCrossEntropyLoss(SegSoftmaxCrossEntropyLoss):
"""
SegSoftmaxCrossEntropyLoss with auxiliary loss support.
Parameters:
----------
aux : bool, default True
Whether to use auxiliary loss.
aux_weight : float, default 0.2
The weight for aux loss.
ignore_label : int, default -1
The label to ignore.
"""
def __init__(self,
aux=True,
aux_weight=0.2,
ignore_label=-1,
**kwargs):
super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_label=ignore_label, **kwargs)
self.aux = aux
self.aux_weight = aux_weight
def _aux_forward(self, F, pred1, pred2, label):
"""
Compute loss including auxiliary output.
"""
loss1 = super(MixSoftmaxCrossEntropyLoss, self).hybrid_forward(F, pred1, label)
loss2 = super(MixSoftmaxCrossEntropyLoss, self). hybrid_forward(F, pred2, label)
return loss1 + self.aux_weight * loss2
def hybrid_forward(self, F, preds, label, **kwargs):
"""
Compute loss.
"""
if self.aux:
return self._aux_forward(F, preds[0], preds[1], label)
else:
return super(MixSoftmaxCrossEntropyLoss, self).hybrid_forward(F, preds, label)
| 3,478 | 33.79 | 102 | py |
imgclsmob | imgclsmob-master/gluon/weighted_random_sampler.py | """
Dataset weighted random sampler.
"""
__all__ = ['WeightedRandomSampler']
import numpy as np
import mxnet as mx
from mxnet.gluon.data import Sampler
class WeightedRandomSampler(Sampler):
"""
Samples elements from [0, length) randomly without replacement.
Parameters:
----------
length : int
Length of the sequence.
weights : np.array of float
Normalized weights of samples.
"""
def __init__(self,
length,
weights):
assert (isinstance(length, int) and length > 0)
assert (len(weights) == length)
assert (np.abs(weights.sum() - 1.0) <= 1e-5)
self._length = length
self._weights = weights.copy()
def __iter__(self):
indices = mx.nd.random.multinomial(mx.nd.array(self._weights), shape=self._length).asnumpy()
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
| 969 | 23.871795 | 100 | py |
imgclsmob | imgclsmob-master/gluon/dataset_utils.py | """
Dataset routines.
"""
__all__ = ['get_dataset_metainfo', 'get_train_data_source', 'get_val_data_source', 'get_test_data_source',
'get_batch_fn']
from .datasets.imagenet1k_cls_dataset import ImageNet1KMetaInfo
from .datasets.imagenet1k_rec_cls_dataset import ImageNet1KRecMetaInfo
from .datasets.cub200_2011_cls_dataset import CUB200MetaInfo
from .datasets.cifar10_cls_dataset import CIFAR10MetaInfo
from .datasets.cifar100_cls_dataset import CIFAR100MetaInfo
from .datasets.svhn_cls_dataset import SVHNMetaInfo
from .datasets.voc_seg_dataset import VOCMetaInfo
from .datasets.ade20k_seg_dataset import ADE20KMetaInfo
from .datasets.cityscapes_seg_dataset import CityscapesMetaInfo
from .datasets.coco_seg_dataset import CocoSegMetaInfo
from .datasets.coco_det_dataset import CocoDetMetaInfo
from .datasets.widerface_det_dataset import WiderfaceDetMetaInfo
from .datasets.coco_hpe1_dataset import CocoHpe1MetaInfo
from .datasets.coco_hpe2_dataset import CocoHpe2MetaInfo
from .datasets.coco_hpe3_dataset import CocoHpe3MetaInfo
from .datasets.hpatches_mch_dataset import HPatchesMetaInfo
from .datasets.librispeech_asr_dataset import LibriSpeechMetaInfo
from .datasets.mcv_asr_dataset import McvMetaInfo
from .weighted_random_sampler import WeightedRandomSampler
from mxnet.gluon.data import DataLoader
from mxnet.gluon.utils import split_and_load
def get_dataset_metainfo(dataset_name):
"""
Get dataset metainfo by name of dataset.
Parameters:
----------
dataset_name : str
Dataset name.
Returns:
-------
DatasetMetaInfo
Dataset metainfo.
"""
dataset_metainfo_map = {
"ImageNet1K": ImageNet1KMetaInfo,
"ImageNet1K_rec": ImageNet1KRecMetaInfo,
"CUB200_2011": CUB200MetaInfo,
"CIFAR10": CIFAR10MetaInfo,
"CIFAR100": CIFAR100MetaInfo,
"SVHN": SVHNMetaInfo,
"VOC": VOCMetaInfo,
"ADE20K": ADE20KMetaInfo,
"Cityscapes": CityscapesMetaInfo,
"CocoSeg": CocoSegMetaInfo,
"CocoDet": CocoDetMetaInfo,
"WiderFace": WiderfaceDetMetaInfo,
"CocoHpe1": CocoHpe1MetaInfo,
"CocoHpe2": CocoHpe2MetaInfo,
"CocoHpe3": CocoHpe3MetaInfo,
"HPatches": HPatchesMetaInfo,
"LibriSpeech": LibriSpeechMetaInfo,
"MCV": McvMetaInfo,
}
if dataset_name in dataset_metainfo_map.keys():
return dataset_metainfo_map[dataset_name]()
else:
raise Exception("Unrecognized dataset: {}".format(dataset_name))
def get_train_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader or ImageRecordIter
Data source.
"""
if ds_metainfo.use_imgrec:
return ds_metainfo.train_imgrec_iter(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
else:
transform_train = ds_metainfo.train_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="train",
transform=(transform_train if ds_metainfo.do_transform else None),
**kwargs)
if not ds_metainfo.do_transform:
if ds_metainfo.do_transform_first:
dataset = dataset.transform_first(fn=transform_train)
else:
dataset = dataset.transform(fn=transform_train)
ds_metainfo.update_from_dataset(dataset)
if not ds_metainfo.train_use_weighted_sampler:
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
last_batch="discard",
num_workers=num_workers)
else:
sampler = WeightedRandomSampler(
length=len(dataset),
weights=dataset._data.sample_weights)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
# shuffle=True,
sampler=sampler,
last_batch="discard",
batchify_fn=ds_metainfo.batchify_fn,
num_workers=num_workers)
def get_val_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader or ImageRecordIter
Data source.
"""
if ds_metainfo.use_imgrec:
return ds_metainfo.val_imgrec_iter(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
else:
transform_val = ds_metainfo.val_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=(transform_val if ds_metainfo.do_transform else None),
**kwargs)
if not ds_metainfo.do_transform:
if ds_metainfo.do_transform_first:
dataset = dataset.transform_first(fn=transform_val)
else:
dataset = dataset.transform(fn=transform_val)
ds_metainfo.update_from_dataset(dataset)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
last_batch=ds_metainfo.batchify_fn,
batchify_fn=ds_metainfo.batchify_fn,
num_workers=num_workers)
def get_test_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for testing subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader or ImageRecordIter
Data source.
"""
if ds_metainfo.use_imgrec:
return ds_metainfo.val_imgrec_iter(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
else:
transform_test = ds_metainfo.test_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=(transform_test if ds_metainfo.do_transform else None),
**kwargs)
if not ds_metainfo.do_transform:
if ds_metainfo.do_transform_first:
dataset = dataset.transform_first(fn=transform_test)
else:
dataset = dataset.transform(fn=transform_test)
ds_metainfo.update_from_dataset(dataset)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
last_batch=ds_metainfo.last_batch,
batchify_fn=ds_metainfo.batchify_fn,
num_workers=num_workers)
def get_batch_fn(ds_metainfo):
"""
Get function for splitting data after extraction from data loader.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
Returns:
-------
func
Desired function.
"""
if ds_metainfo.ml_type == "asr":
def batch_fn(batch, ctx):
# data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
# data2 = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# label = split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
# return data, data2, label
# data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
# label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# return data, label
data = split_and_load(batch[0][0], ctx_list=ctx, batch_axis=0)
data2 = split_and_load(batch[0][1], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, data2, label
return batch_fn
elif ds_metainfo.use_imgrec:
def batch_fn(batch, ctx):
data = split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
return batch_fn
else:
def batch_fn(batch, ctx):
data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
return batch_fn
| 9,354 | 33.648148 | 117 | py |
imgclsmob | imgclsmob-master/gluon/model_stats.py | """
Routines for model statistics calculation.
"""
import logging
import numpy as np
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.contrib.nn import Identity, PixelShuffle2D
from .gluoncv2.models.common import ReLU6, ChannelShuffle, ChannelShuffle2, PReLU2, HSigmoid, HSwish,\
InterpolationBlock, HeatmapMaxDetBlock
from .gluoncv2.models.fishnet import ChannelSqueeze
from .gluoncv2.models.irevnet import IRevDownscale, IRevSplitBlock, IRevMergeBlock
from .gluoncv2.models.rir_cifar import RiRFinalBlock
from .gluoncv2.models.proxylessnas import ProxylessUnit
from .gluoncv2.models.lwopenpose_cmupan import LwopDecoderFinalBlock
from .gluoncv2.models.centernet import CenterNetHeatmapMaxDet
from .gluoncv2.models.danet import ScaleBlock
from .gluoncv2.models.jasper import MaskConv1d, NemoMelSpecExtractor
__all__ = ['measure_model']
def calc_block_num_params2(net):
"""
Calculate number of trainable parameters in the block (not iterative).
Parameters:
----------
net : Block
Model/block.
Returns:
-------
int
Number of parameters.
"""
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def calc_block_num_params(block):
"""
Calculate number of trainable parameters in the block (iterative).
Parameters:
----------
block : Block
Model/block.
Returns:
-------
int
Number of parameters.
"""
weight_count = 0
for param in block.params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def measure_model(model,
in_shapes,
ctx=mx.cpu()):
"""
Calculate model statistics.
Parameters:
----------
model : HybridBlock
Tested model.
in_shapes : list of tuple of ints
Shapes of the input tensors.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
global num_flops
global num_macs
global num_params
global names
num_flops = 0
num_macs = 0
num_params = 0
names = {}
def call_hook(block, x, y):
if not (isinstance(block, IRevSplitBlock) or isinstance(block, IRevMergeBlock) or
isinstance(block, RiRFinalBlock) or isinstance(block, InterpolationBlock) or
isinstance(block, MaskConv1d) or isinstance(block, NemoMelSpecExtractor)):
assert (len(x) == 1)
assert (len(block._children) == 0)
if isinstance(block, nn.Dense):
batch = x[0].shape[0]
in_units = block._in_units
out_units = block._units
extra_num_macs = in_units * out_units
if block.bias is None:
extra_num_flops = (2 * in_units - 1) * out_units
else:
extra_num_flops = 2 * in_units * out_units
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(block, nn.Activation):
if block._act_type == "relu":
extra_num_flops = x[0].size
extra_num_macs = 0
elif block._act_type == "sigmoid":
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
else:
raise TypeError("Unknown activation type: {}".format(block._act_type))
elif isinstance(block, nn.ELU):
extra_num_flops = 3 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.LeakyReLU):
extra_num_flops = 2 * x[0].size
extra_num_macs = 0
elif isinstance(block, ReLU6):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, PReLU2):
extra_num_flops = 3 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.Swish):
extra_num_flops = 5 * x[0].size
extra_num_macs = 0
elif isinstance(block, HSigmoid):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, HSwish):
extra_num_flops = 2 * x[0].size
extra_num_macs = 0
elif type(block) in [nn.Conv2DTranspose]:
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.Conv2D):
batch = x[0].shape[0]
x_h = x[0].shape[2]
x_w = x[0].shape[3]
kernel_size = block._kwargs["kernel"]
strides = block._kwargs["stride"]
dilation = block._kwargs["dilate"]
padding = block._kwargs["pad"]
groups = block._kwargs["num_group"]
in_channels = block._in_channels
out_channels = block._channels
y_h = (x_h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // strides[0] + 1
y_w = (x_w + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // strides[1] + 1
assert (out_channels == y.shape[1])
assert (y_h == y.shape[2])
assert (y_w == y.shape[3])
kernel_total_size = kernel_size[0] * kernel_size[1]
y_size = y_h * y_w
extra_num_macs = kernel_total_size * in_channels * y_size * out_channels // groups
if block.bias is None:
extra_num_flops = (2 * kernel_total_size * y_size - 1) * in_channels * out_channels // groups
else:
extra_num_flops = 2 * kernel_total_size * in_channels * y_size * out_channels // groups
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(block, nn.BatchNorm):
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.InstanceNorm):
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
elif type(block) in [nn.MaxPool2D, nn.AvgPool2D, nn.GlobalAvgPool2D, nn.GlobalMaxPool2D]:
batch = x[0].shape[0]
assert (x[0].shape[1] == y.shape[1])
pool_size = block._kwargs["kernel"]
y_h = y.shape[2]
y_w = y.shape[3]
channels = x[0].shape[1]
y_size = y_h * y_w
pool_total_size = pool_size[0] * pool_size[1]
extra_num_flops = channels * y_size * pool_total_size
extra_num_macs = 0
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(block, nn.Dropout):
extra_num_flops = 0
extra_num_macs = 0
elif type(block) in [nn.Flatten]:
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(block, nn.HybridSequential):
assert (len(block._children) == 0)
extra_num_flops = 0
extra_num_macs = 0
elif type(block) in [ChannelShuffle, ChannelShuffle2]:
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, Identity):
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(block, PixelShuffle2D):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, ChannelSqueeze):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, IRevDownscale):
extra_num_flops = 5 * x[0].size
extra_num_macs = 0
elif isinstance(block, IRevSplitBlock):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, IRevMergeBlock):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, RiRFinalBlock):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, ProxylessUnit):
extra_num_flops = x[0].size
extra_num_macs = 0
elif type(block) in [MaskConv1d, nn.Conv1D]:
if isinstance(y, tuple):
assert isinstance(block, MaskConv1d)
y = y[0]
batch = x[0].shape[0]
x_h = x[0].shape[2]
kernel_size = block._kwargs["kernel"]
strides = block._kwargs["stride"]
dilation = block._kwargs["dilate"]
padding = block._kwargs["pad"]
groups = block._kwargs["num_group"]
in_channels = block._in_channels
out_channels = block._channels
y_h = (x_h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // strides[0] + 1
assert (out_channels == y.shape[1])
assert (y_h == y.shape[2])
kernel_total_size = kernel_size[0]
y_size = y_h
extra_num_macs = kernel_total_size * in_channels * y_size * out_channels // groups
if block.bias is None:
extra_num_flops = (2 * kernel_total_size * y_size - 1) * in_channels * out_channels // groups
else:
extra_num_flops = 2 * kernel_total_size * in_channels * y_size * out_channels // groups
extra_num_flops *= batch
extra_num_macs *= batch
elif type(block) in [InterpolationBlock, HeatmapMaxDetBlock, CenterNetHeatmapMaxDet, ScaleBlock,
NemoMelSpecExtractor]:
extra_num_flops, extra_num_macs = block.calc_flops(x[0])
elif isinstance(block, LwopDecoderFinalBlock):
if not block.calc_3d_features:
extra_num_flops = 0
extra_num_macs = 0
else:
raise TypeError("LwopDecoderFinalBlock!")
else:
raise TypeError("Unknown layer type: {}".format(type(block)))
global num_flops
global num_macs
global num_params
global names
num_flops += extra_num_flops
num_macs += extra_num_macs
if block.name not in names:
names[block.name] = 1
num_params += calc_block_num_params(block)
def register_forward_hooks(a_block):
if len(a_block._children) > 0:
assert (calc_block_num_params(a_block) == 0)
children_handles = []
for child_block in a_block._children.values():
child_handles = register_forward_hooks(child_block)
children_handles += child_handles
return children_handles
else:
handle = a_block.register_forward_hook(call_hook)
return [handle]
hook_handles = register_forward_hooks(model)
if len(in_shapes) == 1:
x = mx.nd.zeros(in_shapes[0], ctx=ctx)
model(x)
elif len(in_shapes) == 2:
x1 = mx.nd.zeros(in_shapes[0], ctx=ctx)
x2 = mx.nd.zeros(in_shapes[1], ctx=ctx)
model(x1, x2)
else:
raise NotImplementedError()
num_params1 = calc_block_num_params2(model)
if num_params != num_params1:
logging.warning(
"Calculated numbers of parameters are different: standard method: {},\tper-leaf method: {}".format(
num_params1, num_params))
[h.detach() for h in hook_handles]
return num_flops, num_macs, num_params1
| 11,415 | 36.552632 | 111 | py |
imgclsmob | imgclsmob-master/gluon/setup.py | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gluoncv2',
version='0.0.64',
description='Image classification and segmentation models for Gluon',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='osemery@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification mxnet gluon imagenet cifar svhn vgg '
'resnet pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 hrnet xdensnet '
'squeezenet squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet '
'pnasnet ror proxylessnas dianet efficientnet mixnet image-segmentation voc ade20k cityscapes coco pspnet '
'deeplabv3 fcn',
packages=find_packages(exclude=['datasets', 'metrics', 'others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy'],
)
| 1,566 | 42.527778 | 120 | py |
imgclsmob | imgclsmob-master/gluon/utils.py | """
Main routines shared between training and evaluation scripts.
"""
__all__ = ['prepare_mx_context', 'get_initializer', 'prepare_model', 'calc_net_weight_count', 'validate',
'validate_asr', 'report_accuracy', 'get_composite_metric', 'get_metric_name', 'get_loss']
import os
import re
import logging
import numpy as np
import mxnet as mx
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from .gluoncv2.model_provider import get_model
from .metrics.cls_metrics import Top1Error, TopKError
from .metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric
from .metrics.det_metrics import CocoDetMApMetric, VOC07MApMetric, WiderfaceDetMetric
from .metrics.hpe_metrics import CocoHpeOksApMetric
from .metrics.asr_metrics import WER
from .losses import SegSoftmaxCrossEntropyLoss, MixSoftmaxCrossEntropyLoss
def prepare_mx_context(num_gpus,
batch_size):
"""
Prepare MXNet context and correct batch size.
Parameters:
----------
num_gpus : int
Number of GPU.
batch_size : int
Batch size for each GPU.
Returns:
-------
Context
MXNet context.
int
Batch size for all GPUs.
"""
ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
batch_size *= max(1, num_gpus)
return ctx, batch_size
def get_initializer(initializer_name):
"""
Get initializer by name.
Parameters:
----------
initializer_name : str
Initializer name.
Returns:
-------
Initializer
Initializer.
"""
if initializer_name == "MSRAPrelu":
return mx.init.MSRAPrelu()
elif initializer_name == "Xavier":
return mx.init.Xavier()
elif initializer_name == "Xavier-gaussian-out-2":
return mx.init.Xavier(
rnd_type="gaussian",
factor_type="out",
magnitude=2)
else:
return None
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path,
dtype,
net_extra_kwargs=None,
load_ignore_extra=False,
tune_layers=None,
classes=None,
in_channels=None,
do_hybridize=True,
initializer=mx.init.MSRAPrelu(),
ctx=mx.cpu()):
"""
Create and initialize model by name.
Parameters:
----------
model_name : str
Model name.
use_pretrained : bool
Whether to use pretrained weights.
pretrained_model_file_path : str
Path to file with pretrained weights.
dtype : str
Base data type for tensors.
net_extra_kwargs : dict, default None
Extra parameters for model.
load_ignore_extra : bool, default False
Whether to ignore extra layers in pretrained model.
tune_layers : dict, default False
Layers for tuning (all other will be frozen).
classes : int, default None
Number of classes.
in_channels : int, default None
Number of input channels.
do_hybridize : bool, default True
Whether to hybridize model.
initializer : Initializer
Initializer.
ctx : Context, default CPU
MXNet context.
Returns:
-------
HybridBlock
Model.
"""
kwargs = {"ctx": ctx,
"pretrained": use_pretrained}
if classes is not None:
kwargs["classes"] = classes
if in_channels is not None:
kwargs["in_channels"] = in_channels
if net_extra_kwargs is not None:
kwargs.update(net_extra_kwargs)
net = get_model(model_name, **kwargs)
if pretrained_model_file_path:
assert (os.path.isfile(pretrained_model_file_path))
logging.info("Loading model: {}".format(pretrained_model_file_path))
net.load_parameters(
filename=pretrained_model_file_path,
ctx=ctx,
ignore_extra=load_ignore_extra)
net.cast(dtype)
if do_hybridize:
net.hybridize(
static_alloc=True,
static_shape=True)
if pretrained_model_file_path or use_pretrained:
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize(initializer, ctx=ctx)
else:
net.initialize(initializer, ctx=ctx)
if (tune_layers is not None) and tune_layers:
tune_layers_pattern = re.compile(tune_layers)
for k, v in net._collect_params_with_prefix().items():
if tune_layers_pattern.match(k):
logging.info("Fine-tune parameter: {}".format(k))
else:
v.grad_req = "null"
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize(initializer, ctx=ctx)
return net
def calc_net_weight_count(net):
"""
Calculate number of model trainable parameters.
Parameters:
----------
net : HybridBlock
Model.
Returns:
-------
int
Number of parameters.
"""
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def validate(metric,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
"""
Core validation/testing routine.
Parameters:
----------
metric : EvalMetric
Metric object instance.
net : HybridBlock
Model.
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
Returns:
-------
EvalMetric
Metric object instance.
"""
if data_source_needs_reset:
val_data.reset()
metric.reset()
for batch in val_data:
data_list, labels_list = batch_fn(batch, ctx)
outputs_list = [net(x.astype(dtype, copy=False)) for x in data_list]
metric.update(labels_list, outputs_list)
return metric
def validate_asr(metric,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
"""
Core validation/testing routine for ASR.
Parameters:
----------
metric : EvalMetric
Metric object instance.
net : HybridBlock
Model.
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
Returns:
-------
EvalMetric
Metric object instance.
"""
if data_source_needs_reset:
val_data.reset()
metric.reset()
for batch in val_data:
data_list, data2_list, labels_list = batch_fn(batch, ctx)
outputs_list = [net(x.astype(dtype, copy=False), x2.astype(dtype, copy=False)) for (x, x2) in
zip(data_list, data2_list)]
metric.update(labels_list, outputs_list)
return metric
def report_accuracy(metric,
extended_log=False):
"""
Make report string for composite metric.
Parameters:
----------
metric : EvalMetric
Metric object instance.
extended_log : bool, default False
Whether to log more precise accuracy values.
Returns:
-------
str
Report string.
"""
def create_msg(name, value):
if type(value) in [list, tuple]:
if extended_log:
return "{}={} ({})".format("{}", "/".join(["{:.4f}"] * len(value)), "/".join(["{}"] * len(value))).\
format(name, *(value + value))
else:
return "{}={}".format("{}", "/".join(["{:.4f}"] * len(value))).format(name, *value)
else:
if extended_log:
return "{name}={value:.4f} ({value})".format(name=name, value=value)
else:
return "{name}={value:.4f}".format(name=name, value=value)
metric_info = metric.get()
if isinstance(metric, mx.metric.CompositeEvalMetric):
msg = ", ".join([create_msg(name=m[0], value=m[1]) for m in zip(*metric_info)])
elif isinstance(metric, mx.metric.EvalMetric):
msg = create_msg(name=metric_info[0], value=metric_info[1])
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
return msg
def get_metric(metric_name, metric_extra_kwargs):
"""
Get metric by name.
Parameters:
----------
metric_name : str
Metric name.
metric_extra_kwargs : dict
Metric extra parameters.
Returns:
-------
EvalMetric
Metric object instance.
"""
if metric_name == "Top1Error":
return Top1Error(**metric_extra_kwargs)
elif metric_name == "TopKError":
return TopKError(**metric_extra_kwargs)
elif metric_name == "PixelAccuracyMetric":
return PixelAccuracyMetric(**metric_extra_kwargs)
elif metric_name == "MeanIoUMetric":
return MeanIoUMetric(**metric_extra_kwargs)
elif metric_name == "CocoDetMApMetric":
return CocoDetMApMetric(**metric_extra_kwargs)
elif metric_name == "VOC07MApMetric":
return VOC07MApMetric(**metric_extra_kwargs)
elif metric_name == "WiderfaceDetMetric":
return WiderfaceDetMetric(**metric_extra_kwargs)
elif metric_name == "CocoHpeOksApMetric":
return CocoHpeOksApMetric(**metric_extra_kwargs)
elif metric_name == "WER":
return WER(**metric_extra_kwargs)
else:
raise Exception("Wrong metric name: {}".format(metric_name))
def get_composite_metric(metric_names, metric_extra_kwargs):
"""
Get composite metric by list of metric names.
Parameters:
----------
metric_names : list of str
Metric name list.
metric_extra_kwargs : list of dict
Metric extra parameters list.
Returns:
-------
CompositeEvalMetric
Metric object instance.
"""
if len(metric_names) == 1:
metric = get_metric(metric_names[0], metric_extra_kwargs[0])
else:
metric = mx.metric.CompositeEvalMetric()
for name, extra_kwargs in zip(metric_names, metric_extra_kwargs):
metric.add(get_metric(name, extra_kwargs))
return metric
def get_metric_name(metric, index):
"""
Get metric name by index in the composite metric.
Parameters:
----------
metric : CompositeEvalMetric or EvalMetric
Metric object instance.
index : int
Index.
Returns:
-------
str
Metric name.
"""
if isinstance(metric, mx.metric.CompositeEvalMetric):
return metric.metrics[index].name
elif isinstance(metric, mx.metric.EvalMetric):
assert (index == 0)
return metric.name
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
def get_loss(loss_name, loss_extra_kwargs):
"""
Get loss by name.
Parameters:
----------
loss_name : str
Loss name.
loss_extra_kwargs : dict
Loss extra parameters.
Returns:
-------
Loss
Loss object instance.
"""
if loss_name == "SoftmaxCrossEntropy":
return SoftmaxCrossEntropyLoss(**loss_extra_kwargs)
if loss_name == "SegSoftmaxCrossEntropy":
return SegSoftmaxCrossEntropyLoss(**loss_extra_kwargs)
if loss_name == "MixSoftmaxCrossEntropy":
return MixSoftmaxCrossEntropyLoss(**loss_extra_kwargs)
else:
raise Exception("Wrong loss name: {}".format(loss_name))
| 12,230 | 27.444186 | 116 | py |
imgclsmob | imgclsmob-master/gluon/distillation.py | """
DNN distillation routines.
"""
__all__ = ['MealDiscriminator', 'MealAdvLoss']
from mxnet.gluon import nn, HybridBlock
from .gluoncv2.models.common import conv1x1, conv1x1_block
from mxnet.gluon.loss import SigmoidBinaryCrossEntropyLoss
class MealDiscriminator(HybridBlock):
"""
MEALv2 discriminator.
Parameters:
----------
classes : int, default 1000
Number of classification classes.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
classes=1000,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(MealDiscriminator, self).__init__(**kwargs)
in_channels = classes
channels = [200, 40, 8]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for out_channels in channels:
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=1,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = x.expand_dims(-1).expand_dims(-1)
x = self.features(x)
x = self.output(x)
x = x.squeeze(1)
return x
class MealAdvLoss(SigmoidBinaryCrossEntropyLoss):
"""
MEALv2 adversarial loss.
Parameters:
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and BCE together, which is more numerically
stable through log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self,
**kwargs):
super(MealAdvLoss, self).__init__(**kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None, pos_weight=None):
z_pred = F.zeros_like(pred)
loss_pred = super(MealAdvLoss, self).hybrid_forward(F, pred, z_pred)
z_label = F.ones_like(label)
loss_label = super(MealAdvLoss, self).hybrid_forward(F, label, z_label)
return loss_pred + loss_label
def _test():
import numpy as np
import mxnet as mx
model = MealDiscriminator
net = model()
ctx = mx.cpu()
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
# assert (model != MealDiscriminator or weight_count == 208834)
batch = 14
classes = 1000
x = mx.nd.random.normal(shape=(batch, classes), ctx=ctx)
y = net(x)
assert (y.shape == (batch,))
loss = MealAdvLoss()
z = loss(y, 1 - y)
print(z)
pass
if __name__ == "__main__":
_test()
| 3,585 | 28.393443 | 98 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/airnext.py | """
AirNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .airnet import AirBlock, AirInitBlock
class AirNeXtBottleneck(HybridBlock):
"""
AirNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.use_air_block = (strides == 1 and mid_channels < 512)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=group_width,
groups=(cardinality // ratio),
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
def hybrid_forward(self, F, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirNeXtUnit(HybridBlock):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = AirNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirNeXt(HybridBlock):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
ratio,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AirNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
in_size = tuple([x // 4 for x in in_size])
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(AirNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size))
in_channels = out_channels
in_size = tuple([x // strides for x in in_size])
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_airnext(blocks,
cardinality,
bottleneck_width,
base_channels,
ratio,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def airnext50_32x4d_r2(**kwargs):
"""
AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=50,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext50_32x4d_r2",
**kwargs)
def airnext101_32x4d_r2(**kwargs):
"""
AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext101_32x4d_r2",
**kwargs)
def airnext101_32x4d_r16(**kwargs):
"""
AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=16,
model_name="airnext101_32x4d_r16",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
airnext50_32x4d_r2,
airnext101_32x4d_r2,
airnext101_32x4d_r16,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnext50_32x4d_r2 or weight_count == 27604296)
assert (model != airnext101_32x4d_r2 or weight_count == 54099272)
assert (model != airnext101_32x4d_r16 or weight_count == 45456456)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,827 | 31.845606 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pspnet.py | """
PSPNet for image segmentation, implemented in Gluon.
Original paper: 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
"""
__all__ = ['PSPNet', 'pspnet_resnetd50b_voc', 'pspnet_resnetd101b_voc', 'pspnet_resnetd50b_coco',
'pspnet_resnetd101b_coco', 'pspnet_resnetd50b_ade20k', 'pspnet_resnetd101b_ade20k',
'pspnet_resnetd50b_cityscapes', 'pspnet_resnetd101b_cityscapes', 'PyramidPooling']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent, Identity
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class PSPFinalBlock(HybridBlock):
"""
PSPNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(PSPFinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
return x
class PyramidPoolingBranch(HybridBlock):
"""
Pyramid Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_out_size : int
Target output size of the image.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
pool_out_size,
upscale_out_size,
**kwargs):
super(PyramidPoolingBranch, self).__init__(**kwargs)
self.pool_out_size = pool_out_size
self.upscale_out_size = upscale_out_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=self.pool_out_size)
x = self.conv(x)
x = F.contrib.BilinearResize2D(x, height=in_size[0], width=in_size[1])
return x
class PyramidPooling(HybridBlock):
"""
Pyramid Pooling module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
upscale_out_size,
**kwargs):
super(PyramidPooling, self).__init__(**kwargs)
pool_out_sizes = [1, 2, 3, 6]
assert (len(pool_out_sizes) == 4)
assert (in_channels % 4 == 0)
mid_channels = in_channels // 4
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Identity())
for pool_out_size in pool_out_sizes:
self.branches.add(PyramidPoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
pool_out_size=pool_out_size,
upscale_out_size=upscale_out_size))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class PSPNet(HybridBlock):
"""
PSPNet model from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(PSPNet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = PyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size)
pool_out_channels = 2 * backbone_out_channels
self.final_block = PSPFinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=8)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = PSPFinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_pspnet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PSPNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = PSPNet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def pspnet_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_voc", **kwargs)
def pspnet_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_voc", **kwargs)
def pspnet_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_coco", **kwargs)
def pspnet_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_coco", **kwargs)
def pspnet_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_ade20k", **kwargs)
def pspnet_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_ade20k", **kwargs)
def pspnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_cityscapes", **kwargs)
def pspnet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
# (pspnet_resnetd50b_voc, 21),
# (pspnet_resnetd101b_voc, 21),
# (pspnet_resnetd50b_coco, 21),
# (pspnet_resnetd101b_coco, 21),
# (pspnet_resnetd50b_ade20k, 150),
# (pspnet_resnetd101b_ade20k, 150),
(pspnet_resnetd50b_cityscapes, 19),
# (pspnet_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != pspnet_resnetd50b_voc or weight_count == 49081578)
assert (model != pspnet_resnetd101b_voc or weight_count == 68073706)
assert (model != pspnet_resnetd50b_coco or weight_count == 49081578)
assert (model != pspnet_resnetd101b_coco or weight_count == 68073706)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 49180908)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 68173036)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 49080038)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 68072166)
else:
assert (model != pspnet_resnetd50b_voc or weight_count == 46716373)
assert (model != pspnet_resnetd101b_voc or weight_count == 65708501)
assert (model != pspnet_resnetd50b_coco or weight_count == 46716373)
assert (model != pspnet_resnetd101b_coco or weight_count == 65708501)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 46782550)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 65774678)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 46715347)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 65707475)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 19,131 | 37.035785 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/dla.py | """
DLA for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
"""
__all__ = ['DLA', 'dla34', 'dla46c', 'dla46xc', 'dla60', 'dla60x', 'dla60xc', 'dla102', 'dla102x', 'dla102x2', 'dla169']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, conv7x7_block
from .resnet import ResBlock, ResBottleneck
from .resnext import ResNeXtBottleneck
class DLABottleneck(ResBottleneck):
"""
DLA bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 2
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck_factor=2,
**kwargs):
super(DLABottleneck, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck_factor=bottleneck_factor,
**kwargs)
class DLABottleneckX(ResNeXtBottleneck):
"""
DLA ResNeXt-like bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
cardinality: int, default 32
Number of groups.
bottleneck_width: int, default 8
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
cardinality=32,
bottleneck_width=8,
**kwargs):
super(DLABottleneckX, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
class DLAResBlock(HybridBlock):
"""
DLA residual block with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
body_class : nn.Module, default ResBlock
Residual block body class.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
body_class=ResBlock,
return_down=False,
**kwargs):
super(DLAResBlock, self).__init__(**kwargs)
self.return_down = return_down
self.downsample = (strides > 1)
self.project = (in_channels != out_channels)
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
if self.downsample:
self.downsample_pool = nn.MaxPool2D(
pool_size=strides,
strides=strides)
if self.project:
self.project_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
down = self.downsample_pool(x) if self.downsample else x
identity = self.project_conv(down) if self.project else down
if identity is None:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
if self.return_down:
return x, down
else:
return x
class DLARoot(HybridBlock):
"""
DLA root block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
residual,
**kwargs):
super(DLARoot, self).__init__(**kwargs)
self.residual = residual
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x2, x1, extra):
last_branch = x2
x = F.concat(x2, x1, *extra, dim=1)
x = self.conv(x)
if self.residual:
x = x + last_branch
x = self.activ(x)
return x
class DLATree(HybridBlock):
"""
DLA tree unit. It's like iterative stage.
Parameters:
----------
levels : int
Number of levels in the stage.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
res_body_class : nn.Module
Residual block body class.
strides : int or tuple/list of 2 int
Strides of the convolution in a residual block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
root_residual : bool
Whether use residual connection in the root.
root_dim : int
Number of input channels in the root block.
first_tree : bool, default False
Is this tree stage the first stage in the net.
input_level : bool, default True
Is this tree unit the first unit in the stage.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
levels,
in_channels,
out_channels,
res_body_class,
strides,
bn_use_global_stats,
root_residual,
root_dim=0,
first_tree=False,
input_level=True,
return_down=False,
**kwargs):
super(DLATree, self).__init__(**kwargs)
self.return_down = return_down
self.add_down = (input_level and not first_tree)
self.root_level = (levels == 1)
if root_dim == 0:
root_dim = 2 * out_channels
if self.add_down:
root_dim += in_channels
with self.name_scope():
if self.root_level:
self.tree1 = DLAResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
body_class=res_body_class,
return_down=True)
self.tree2 = DLAResBlock(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
bn_use_global_stats=bn_use_global_stats,
body_class=res_body_class,
return_down=False)
else:
self.tree1 = DLATree(
levels=levels - 1,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
root_residual=root_residual,
root_dim=0,
input_level=False,
return_down=True)
self.tree2 = DLATree(
levels=levels - 1,
in_channels=out_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=1,
bn_use_global_stats=bn_use_global_stats,
root_residual=root_residual,
root_dim=root_dim + out_channels,
input_level=False,
return_down=False)
if self.root_level:
self.root = DLARoot(
in_channels=root_dim,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
residual=root_residual)
def hybrid_forward(self, F, x, extra=None):
extra = [] if extra is None else extra
x1, down = self.tree1(x)
if self.add_down:
extra.append(down)
if self.root_level:
x2 = self.tree2(x1)
x = self.root(x2, x1, extra)
else:
extra.append(x1)
x = self.tree2(x1, extra)
if self.return_down:
return x, down
else:
return x
class DLAInitBlock(HybridBlock):
"""
DLA specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(DLAInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DLA(HybridBlock):
"""
DLA model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
init_block_channels : int
Number of output channels for the initial unit.
res_body_class : nn.Module
Residual block body class.
residual_root : bool
Whether use residual connection in the root blocks.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
levels,
channels,
init_block_channels,
res_body_class,
residual_root,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DLA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(DLAInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i in range(len(levels)):
levels_i = levels[i]
out_channels = channels[i]
first_tree = (i == 0)
self.features.add(DLATree(
levels=levels_i,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=2,
bn_use_global_stats=bn_use_global_stats,
root_residual=residual_root,
first_tree=first_tree))
in_channels = out_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_dla(levels,
channels,
res_body_class,
residual_root=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DLA model with specific parameters.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
res_body_class : nn.Module
Residual block body class.
residual_root : bool, default False
Whether use residual connection in the root blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
net = DLA(
levels=levels,
channels=channels,
init_block_channels=init_block_channels,
res_body_class=res_body_class,
residual_root=residual_root,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def dla34(**kwargs):
"""
DLA-34 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 128, 256, 512], res_body_class=ResBlock, model_name="dla34",
**kwargs)
def dla46c(**kwargs):
"""
DLA-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name="dla46c",
**kwargs)
def dla46xc(**kwargs):
"""
DLA-X-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla46xc", **kwargs)
def dla60(**kwargs):
"""
DLA-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
model_name="dla60", **kwargs)
def dla60x(**kwargs):
"""
DLA-X-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
model_name="dla60x", **kwargs)
def dla60xc(**kwargs):
"""
DLA-X-60-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla60xc", **kwargs)
def dla102(**kwargs):
"""
DLA-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla102", **kwargs)
def dla102x(**kwargs):
"""
DLA-X-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
residual_root=True, model_name="dla102x", **kwargs)
def dla102x2(**kwargs):
"""
DLA-X2-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
class DLABottleneckX64(DLABottleneckX):
def __init__(self, in_channels, out_channels, strides, bn_use_global_stats):
super(DLABottleneckX64, self).__init__(in_channels, out_channels, strides, bn_use_global_stats,
cardinality=64)
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX64,
residual_root=True, model_name="dla102x2", **kwargs)
def dla169(**kwargs):
"""
DLA-169 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[2, 3, 5, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla169", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
dla34,
dla46c,
dla46xc,
dla60,
dla60x,
dla60xc,
dla102,
dla102x,
dla102x2,
dla169,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dla34 or weight_count == 15742104)
assert (model != dla46c or weight_count == 1301400)
assert (model != dla46xc or weight_count == 1068440)
assert (model != dla60 or weight_count == 22036632)
assert (model != dla60x or weight_count == 17352344)
assert (model != dla60xc or weight_count == 1319832)
assert (model != dla102 or weight_count == 33268888)
assert (model != dla102x or weight_count == 26309272)
assert (model != dla102x2 or weight_count == 41282200)
assert (model != dla169 or weight_count == 53389720)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,814 | 32.401122 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/proxylessnas.py | """
ProxylessNAS for ImageNet-1K, implemented in Gluon.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['ProxylessNAS', 'proxylessnas_cpu', 'proxylessnas_gpu', 'proxylessnas_mobile', 'proxylessnas_mobile14',
'ProxylessUnit', 'get_proxylessnas']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock, conv1x1_block, conv3x3_block
class ProxylessBlock(HybridBlock):
"""
ProxylessNAS block for residual path in ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : int
Expansion ratio.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_epsilon,
bn_use_global_stats,
expansion,
**kwargs):
super(ProxylessBlock, self).__init__(**kwargs)
self.use_bc = (expansion > 1)
mid_channels = in_channels * expansion
with self.name_scope():
if self.use_bc:
self.bc_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6")
padding = (kernel_size - 1) // 2
self.dw_conv = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6")
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.use_bc:
x = self.bc_conv(x)
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class ProxylessUnit(HybridBlock):
"""
ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for body block.
strides : int
Strides of the convolution.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : int
Expansion ratio for body block.
residual : bool
Whether to use residual branch.
shortcut : bool
Whether to use identity branch.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_epsilon,
bn_use_global_stats,
expansion,
residual,
shortcut,
**kwargs):
super(ProxylessUnit, self).__init__(**kwargs)
assert (residual or shortcut)
self.residual = residual
self.shortcut = shortcut
with self.name_scope():
if self.residual:
self.body = ProxylessBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion)
def hybrid_forward(self, F, x):
if not self.residual:
return x
if not self.shortcut:
return self.body(x)
identity = x
x = self.body(x)
x = identity + x
return x
class ProxylessNAS(HybridBlock):
"""
ProxylessNAS model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
residuals : list of list of int
Whether to use residual branch in units.
shortcuts : list of list of int
Whether to use identity branch in units.
kernel_sizes : list of list of int
Convolution window size for each units.
expansions : list of list of int
Expansion ratio for each units.
bn_epsilon : float, default 1e-3
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
residuals,
shortcuts,
kernel_sizes,
expansions,
bn_epsilon=1e-3,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ProxylessNAS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
residuals_per_stage = residuals[i]
shortcuts_per_stage = shortcuts[i]
kernel_sizes_per_stage = kernel_sizes[i]
expansions_per_stage = expansions[i]
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
residual = (residuals_per_stage[j] == 1)
shortcut = (shortcuts_per_stage[j] == 1)
kernel_size = kernel_sizes_per_stage[j]
expansion = expansions_per_stage[j]
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ProxylessUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion,
residual=residual,
shortcut=shortcut))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6"))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_proxylessnas(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ProxylessNAS model with specific parameters.
Parameters:
----------
version : str
Version of ProxylessNAS ('cpu', 'gpu', 'mobile' or 'mobile14').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "cpu":
residuals = [[1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [48, 48, 48, 48], [88, 88, 88, 88, 104, 104, 104, 104],
[216, 216, 216, 216, 360]]
kernel_sizes = [[3], [3, 3, 3, 3], [3, 3, 3, 5], [3, 3, 3, 3, 5, 3, 3, 3], [5, 5, 5, 3, 5]]
expansions = [[1], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 3, 3, 3, 6]]
init_block_channels = 40
final_block_channels = 1432
elif version == "gpu":
residuals = [[1], [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [56, 56, 56, 56], [112, 112, 112, 112, 128, 128, 128, 128],
[256, 256, 256, 256, 432]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 3, 3], [7, 5, 5, 5, 5, 3, 3, 5], [7, 7, 7, 5, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 6, 6, 6]]
init_block_channels = 40
final_block_channels = 1728
elif version == "mobile":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[16], [32, 32, 32, 32], [40, 40, 40, 40], [80, 80, 80, 80, 96, 96, 96, 96],
[192, 192, 192, 192, 320]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 32
final_block_channels = 1280
elif version == "mobile14":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [40, 40, 40, 40], [56, 56, 56, 56], [112, 112, 112, 112, 136, 136, 136, 136],
[256, 256, 256, 256, 448]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 48
final_block_channels = 1792
else:
raise ValueError("Unsupported ProxylessNAS version: {}".format(version))
shortcuts = [[0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0]]
net = ProxylessNAS(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
residuals=residuals,
shortcuts=shortcuts,
kernel_sizes=kernel_sizes,
expansions=expansions,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def proxylessnas_cpu(**kwargs):
"""
ProxylessNAS (CPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="cpu", model_name="proxylessnas_cpu", **kwargs)
def proxylessnas_gpu(**kwargs):
"""
ProxylessNAS (GPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="gpu", model_name="proxylessnas_gpu", **kwargs)
def proxylessnas_mobile(**kwargs):
"""
ProxylessNAS (Mobile) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile", model_name="proxylessnas_mobile", **kwargs)
def proxylessnas_mobile14(**kwargs):
"""
ProxylessNAS (Mobile-14) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile14", model_name="proxylessnas_mobile14", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
proxylessnas_cpu,
proxylessnas_gpu,
proxylessnas_mobile,
proxylessnas_mobile14,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu or weight_count == 4361648)
assert (model != proxylessnas_gpu or weight_count == 7119848)
assert (model != proxylessnas_mobile or weight_count == 4080512)
assert (model != proxylessnas_mobile14 or weight_count == 6857568)
x = mx.nd.zeros((14, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 16,517 | 35.788419 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/isqrtcovresnet.py | """
iSQRT-COV-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root
Normalization,' https://arxiv.org/abs/1712.01034.
"""
__all__ = ['iSQRTCOVResNet', 'isqrtcovresnet18', 'isqrtcovresnet34', 'isqrtcovresnet50', 'isqrtcovresnet50b',
'isqrtcovresnet101', 'isqrtcovresnet101b']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block
from .resnet import ResUnit, ResInitBlock
class CovPool(mx.autograd.Function):
"""
Covariance pooling function.
"""
def forward(self, x):
batch, channels, height, width = x.shape
n = height * width
xn = x.reshape(batch, channels, n)
identity_bar = ((1.0 / n) * mx.nd.eye(n, ctx=xn.context, dtype=xn.dtype)).expand_dims(axis=0).repeat(
repeats=batch, axis=0)
ones_bar = mx.nd.full(shape=(batch, n, n), val=(-1.0 / n / n), ctx=xn.context, dtype=xn.dtype)
i_bar = identity_bar + ones_bar
sigma = mx.nd.batch_dot(mx.nd.batch_dot(xn, i_bar), xn.transpose(axes=(0, 2, 1)))
self.save_for_backward(x, i_bar)
return sigma
def backward(self, grad_sigma):
x, i_bar = self.saved_tensors
batch, channels, height, width = x.shape
n = height * width
xn = x.reshape(batch, channels, n)
grad_x = grad_sigma + grad_sigma.transpose(axes=(0, 2, 1))
grad_x = mx.nd.batch_dot(mx.nd.batch_dot(grad_x, xn), i_bar)
grad_x = grad_x.reshape(batch, channels, height, width)
return grad_x
class NewtonSchulzSqrt(mx.autograd.Function):
"""
Newton-Schulz iterative matrix square root function.
Parameters:
----------
n : int
Number of iterations (n > 1).
"""
def __init__(self, n):
super(NewtonSchulzSqrt, self).__init__()
assert (n > 1)
self.n = n
def forward(self, x):
n = self.n
batch, cols, rows = x.shape
assert (cols == rows)
m = cols
identity = mx.nd.eye(m, ctx=x.context, dtype=x.dtype).expand_dims(axis=0).repeat(repeats=batch, axis=0)
x_trace = (x * identity).sum(axis=(1, 2), keepdims=True)
a = x / x_trace
i3 = 3.0 * identity
yi = mx.nd.zeros(shape=(batch, n - 1, m, m), ctx=x.context, dtype=x.dtype)
zi = mx.nd.zeros(shape=(batch, n - 1, m, m), ctx=x.context, dtype=x.dtype)
b2 = 0.5 * (i3 - a)
yi[:, 0, :, :] = mx.nd.batch_dot(a, b2)
zi[:, 0, :, :] = b2
for i in range(1, n - 1):
b2 = 0.5 * (i3 - mx.nd.batch_dot(zi[:, i - 1, :, :], yi[:, i - 1, :, :]))
yi[:, i, :, :] = mx.nd.batch_dot(yi[:, i - 1, :, :], b2)
zi[:, i, :, :] = mx.nd.batch_dot(b2, zi[:, i - 1, :, :])
b2 = 0.5 * (i3 - mx.nd.batch_dot(zi[:, n - 2, :, :], yi[:, n - 2, :, :]))
yn = mx.nd.batch_dot(yi[:, n - 2, :, :], b2)
x_trace_sqrt = x_trace.sqrt()
c = yn * x_trace_sqrt
self.save_for_backward(x, x_trace, a, yi, zi, yn, x_trace_sqrt)
return c
def backward(self, grad_c):
x, x_trace, a, yi, zi, yn, x_trace_sqrt = self.saved_tensors
n = self.n
batch, m, _ = x.shape
identity0 = mx.nd.eye(m, ctx=x.context, dtype=x.dtype)
identity = identity0.expand_dims(axis=0).repeat(repeats=batch, axis=0)
i3 = 3.0 * identity
grad_yn = grad_c * x_trace_sqrt
b = i3 - mx.nd.batch_dot(yi[:, n - 2, :, :], zi[:, n - 2, :, :])
grad_yi = 0.5 * (mx.nd.batch_dot(grad_yn, b) - mx.nd.batch_dot(mx.nd.batch_dot(
zi[:, n - 2, :, :], yi[:, n - 2, :, :]), grad_yn))
grad_zi = -0.5 * mx.nd.batch_dot(mx.nd.batch_dot(yi[:, n - 2, :, :], grad_yn), yi[:, n - 2, :, :])
for i in range(n - 3, -1, -1):
b = i3 - mx.nd.batch_dot(yi[:, i, :, :], zi[:, i, :, :])
ziyi = mx.nd.batch_dot(zi[:, i, :, :], yi[:, i, :, :])
grad_yi_m1 = 0.5 * (mx.nd.batch_dot(grad_yi, b) - mx.nd.batch_dot(mx.nd.batch_dot(
zi[:, i, :, :], grad_zi), zi[:, i, :, :]) - mx.nd.batch_dot(ziyi, grad_yi))
grad_zi_m1 = 0.5 * (mx.nd.batch_dot(b, grad_zi) - mx.nd.batch_dot(mx.nd.batch_dot(
yi[:, i, :, :], grad_yi), yi[:, i, :, :]) - mx.nd.batch_dot(grad_zi, ziyi))
grad_yi = grad_yi_m1
grad_zi = grad_zi_m1
grad_a = 0.5 * (mx.nd.batch_dot(grad_yi, i3 - a) - grad_zi - mx.nd.batch_dot(a, grad_yi))
x_trace_sqr = x_trace * x_trace
grad_atx_trace = (mx.nd.batch_dot(grad_a.transpose(axes=(0, 2, 1)), x) * identity).sum(
axis=(1, 2), keepdims=True)
grad_cty_trace = (mx.nd.batch_dot(grad_c.transpose(axes=(0, 2, 1)), yn) * identity).sum(
axis=(1, 2), keepdims=True)
grad_x_extra = (0.5 * grad_cty_trace / x_trace_sqrt - grad_atx_trace / x_trace_sqr).tile(
reps=(1, m, m)) * identity
grad_x = grad_a / x_trace + grad_x_extra
return grad_x
class Triuvec(mx.autograd.Function):
"""
Extract upper triangular part of matrix into vector form.
"""
def forward(self, x):
batch, cols, rows = x.shape
assert (cols == rows)
n = cols
import numpy as np
triuvec_inds = np.triu(np.ones(n)).reshape(-1).nonzero()[0]
x_vec = x.reshape(batch, -1)
y = x_vec[:, triuvec_inds]
self.save_for_backward(x, triuvec_inds)
return y
def backward(self, grad_y):
x, triuvec_inds = self.saved_tensors
batch, n, _ = x.shape
grad_x = mx.nd.zeros_like(x).reshape(batch, -1)
grad_x[:, triuvec_inds] = grad_y
grad_x = grad_x.reshape(batch, n, n)
return grad_x
class iSQRTCOVPool(HybridBlock):
"""
iSQRT-COV pooling layer.
Parameters:
----------
num_iter : int, default 5
Number of iterations (num_iter > 1).
"""
def __init__(self,
num_iter=5,
**kwargs):
super(iSQRTCOVPool, self).__init__(**kwargs)
with self.name_scope():
self.cov_pool = CovPool()
self.sqrt = NewtonSchulzSqrt(num_iter)
self.triuvec = Triuvec()
def hybrid_forward(self, F, x):
x = self.cov_pool(x)
x = self.sqrt(x)
x = self.triuvec(x)
return x
class iSQRTCOVResNet(HybridBlock):
"""
iSQRT-COV-ResNet model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(iSQRTCOVResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i not in [0, len(channels) - 1]) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(iSQRTCOVPool())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
in_units = in_channels * (in_channels + 1) // 2
self.output.add(nn.Dense(
units=classes,
in_units=in_units))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_isqrtcovresnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create iSQRT-COV-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported iSQRT-COV-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
final_block_channels = 256
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = iSQRTCOVResNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def isqrtcovresnet18(**kwargs):
"""
iSQRT-COV-ResNet-18 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=18, model_name="isqrtcovresnet18", **kwargs)
def isqrtcovresnet34(**kwargs):
"""
iSQRT-COV-ResNet-34 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=34, model_name="isqrtcovresnet34", **kwargs)
def isqrtcovresnet50(**kwargs):
"""
iSQRT-COV-ResNet-50 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=50, model_name="isqrtcovresnet50", **kwargs)
def isqrtcovresnet50b(**kwargs):
"""
iSQRT-COV-ResNet-50 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=50, conv1_stride=False, model_name="isqrtcovresnet50b", **kwargs)
def isqrtcovresnet101(**kwargs):
"""
iSQRT-COV-ResNet-101 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, model_name="isqrtcovresnet101", **kwargs)
def isqrtcovresnet101b(**kwargs):
"""
iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
isqrtcovresnet18,
isqrtcovresnet34,
isqrtcovresnet50,
isqrtcovresnet50b,
isqrtcovresnet101,
isqrtcovresnet101b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != isqrtcovresnet18 or weight_count == 44205096)
assert (model != isqrtcovresnet34 or weight_count == 54313256)
assert (model != isqrtcovresnet50 or weight_count == 56929832)
assert (model != isqrtcovresnet50b or weight_count == 56929832)
assert (model != isqrtcovresnet101 or weight_count == 75921960)
assert (model != isqrtcovresnet101b or weight_count == 75921960)
x = mx.nd.random.randn(14, 3, 224, 224, ctx=ctx)
# y = net(x)
x.attach_grad()
with mx.autograd.record():
y = net(x)
y.backward()
# print(x.grad)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 17,607 | 35.683333 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shufflenetv2.py | """
ShuffleNet V2 for ImageNet-1K, implemented in Gluon.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock
class ShuffleUnit(HybridBlock):
"""
ShuffleNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
with self.name_scope():
self.compress_conv1 = conv1x1(
in_channels=(in_channels if self.downsample else mid_channels),
out_channels=mid_channels)
self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=mid_channels)
self.expand_bn3 = nn.BatchNorm(in_channels=mid_channels)
if self.use_se:
self.se = SEBlock(channels=mid_channels)
if downsample:
self.dw_conv4 = depthwise_conv3x3(
channels=in_channels,
strides=2)
self.dw_bn4 = nn.BatchNorm(in_channels=in_channels)
self.expand_conv5 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.expand_bn5 = nn.BatchNorm(in_channels=mid_channels)
self.activ = nn.Activation("relu")
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
def hybrid_forward(self, F, x):
if self.downsample:
y1 = self.dw_conv4(x)
y1 = self.dw_bn4(y1)
y1 = self.expand_conv5(y1)
y1 = self.expand_bn5(y1)
y1 = self.activ(y1)
x2 = x
else:
y1, x2 = F.split(x, axis=1, num_outputs=2)
y2 = self.compress_conv1(x2)
y2 = self.compress_bn1(y2)
y2 = self.activ(y2)
y2 = self.dw_conv2(y2)
y2 = self.dw_bn2(y2)
y2 = self.expand_conv3(y2)
y2 = self.expand_bn3(y2)
y2 = self.activ(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = F.concat(y1, y2, dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(HybridBlock):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2(HybridBlock):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,524 | 32.4 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fishnet.py | """
FishNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, InterpolationBlock
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
channels_per_group):
"""
Channel squeeze operation.
Parameters:
----------
x : NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
NDArray
Resulted tensor.
"""
return x.reshape((0, -4, channels_per_group, -1, -2)).sum(axis=2)
class ChannelSqueeze(HybridBlock):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelSqueeze, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
def hybrid_forward(self, F, x):
return channel_squeeze(x, self.channels_per_group)
class PreSEAttBlock(HybridBlock):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
reduction=16,
**kwargs):
super(PreSEAttBlock, self).__init__(**kwargs)
mid_cannels = out_channels // reduction
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.relu = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
use_bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
use_bias=True)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.relu(x)
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class FishBottleneck(HybridBlock):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(FishBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(HybridBlock):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
dilation=1,
bn_use_global_stats=False,
squeeze=False,
**kwargs):
super(FishBlock, self).__init__(**kwargs)
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(HybridBlock):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(DownUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(HybridBlock):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
dilation=1,
bn_use_global_stats=False,
**kwargs):
super(UpUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilation > 1) and (i == 0)
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2, bilinear=False)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(HybridBlock):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class SkipAttUnit(HybridBlock):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipAttUnit, self).__init__(**kwargs)
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats)
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = F.broadcast_add(F.broadcast_mul(x, w), w)
return x
class FishFinalBlock(HybridBlock):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(FishFinalBlock, self).__init__(**kwargs)
mid_channels = in_channels // 2
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.preactiv = PreResActivation(
in_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(HybridBlock):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FishNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
down1_seq = nn.HybridSequential(prefix="")
skip1_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
skip1_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
down1_channels_list = down1_channels[i]
down1_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down1_channels_list[-1]
else:
skip1_seq.add(SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = skip1_channels_list[-1]
up_seq = nn.HybridSequential(prefix="")
skip2_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
skip2_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list,
bn_use_global_stats=bn_use_global_stats))
up_channels_list = up_channels[i]
dilation = 2 ** i
up_seq.add(UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats))
in_channels = up_channels_list[-1]
else:
skip2_seq.add(Identity())
down2_seq = nn.HybridSequential(prefix="")
for i in range(depth):
down2_channels_list = down2_channels[i]
down2_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
self.features.add(SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
self.features.add(FishFinalBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,458 | 33.097384 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/hrnet.py | """
HRNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
"""
__all__ = ['hrnet_w18_small_v1', 'hrnet_w18_small_v2', 'hrnetv2_w18', 'hrnetv2_w30', 'hrnetv2_w32', 'hrnetv2_w40',
'hrnetv2_w44', 'hrnetv2_w48', 'hrnetv2_w64']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1_block, conv3x3_block, DualPathSequential
from .resnet import ResUnit
class UpSamplingBlock(HybridBlock):
"""
HFNet specific upsampling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
scale_factor,
**kwargs):
super(UpSamplingBlock, self).__init__(**kwargs)
self.scale_factor = scale_factor
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
activation=None,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return F.UpSampling(x, scale=self.scale_factor, sample_type="nearest")
class HRBlock(HybridBlock):
"""
HFNet block.
Parameters:
----------
in_channels_list : list of int
Number of input channels.
out_channels_list : list of int
Number of output channels.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblock.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_branches,
num_subblocks,
bn_use_global_stats,
**kwargs):
super(HRBlock, self).__init__(**kwargs)
self.in_channels_list = in_channels_list
self.num_branches = num_branches
with self.name_scope():
self.branches = nn.HybridSequential(prefix="")
for i in range(num_branches):
layers = nn.HybridSequential(prefix="branch{}_".format(i + 1))
in_channels_i = self.in_channels_list[i]
out_channels_i = out_channels_list[i]
for j in range(num_subblocks[i]):
layers.add(ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_i,
strides=1,
bottleneck=False,
bn_use_global_stats=bn_use_global_stats))
in_channels_i = out_channels_i
self.in_channels_list[i] = out_channels_i
self.branches.add(layers)
if num_branches > 1:
self.fuse_layers = nn.HybridSequential(prefix="")
for i in range(num_branches):
fuse_layer = nn.HybridSequential(prefix="fuselayer{}_".format(i + 1))
with fuse_layer.name_scope():
for j in range(num_branches):
if j > i:
fuse_layer.add(UpSamplingBlock(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
bn_use_global_stats=bn_use_global_stats,
scale_factor=2 ** (j - i)))
elif j == i:
fuse_layer.add(Identity())
else:
conv3x3_seq = nn.HybridSequential(prefix="conv3x3seq{}_".format(j + 1))
with conv3x3_seq.name_scope():
for k in range(i - j):
if k == i - j - 1:
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
strides=2,
activation=None,
bn_use_global_stats=bn_use_global_stats))
else:
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[j],
strides=2,
bn_use_global_stats=bn_use_global_stats))
fuse_layer.add(conv3x3_seq)
self.fuse_layers.add(fuse_layer)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x0, x):
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
if self.num_branches == 1:
return x
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.activ(y))
return x0, x_fuse
class HRStage(HybridBlock):
"""
HRNet stage block.
Parameters:
----------
in_channels_list : list of int
Number of output channels from the previous layer.
out_channels_list : list of int
Number of output channels in the current layer.
num_modules : int
Number of modules.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblocks.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_modules,
num_branches,
num_subblocks,
bn_use_global_stats,
**kwargs):
super(HRStage, self).__init__(**kwargs)
self.branches = num_branches
self.in_channels_list = out_channels_list
in_branches = len(in_channels_list)
out_branches = len(out_channels_list)
with self.name_scope():
self.transition = nn.HybridSequential(prefix="")
for i in range(out_branches):
if i < in_branches:
if out_channels_list[i] != in_channels_list[i]:
self.transition.add(conv3x3_block(
in_channels=in_channels_list[i],
out_channels=out_channels_list[i],
strides=1,
bn_use_global_stats=bn_use_global_stats))
else:
self.transition.add(Identity())
else:
conv3x3_seq = nn.HybridSequential(prefix="conv3x3_seq{}_".format(i + 1))
for j in range(i + 1 - in_branches):
in_channels_i = in_channels_list[-1]
out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
strides=2,
bn_use_global_stats=bn_use_global_stats))
self.transition.add(conv3x3_seq)
self.layers = DualPathSequential(prefix="")
for i in range(num_modules):
self.layers.add(HRBlock(
in_channels_list=self.in_channels_list,
out_channels_list=out_channels_list,
num_branches=num_branches,
num_subblocks=num_subblocks,
bn_use_global_stats=bn_use_global_stats))
self.in_channels_list = self.layers[-1].in_channels_list
def hybrid_forward(self, F, x0, x):
x_list = []
for j in range(self.branches):
if not isinstance(self.transition[j], Identity):
x_list.append(self.transition[j](x[-1] if type(x) in (list, tuple) else x))
else:
x_list_j = x[j] if type(x) in (list, tuple) else x
x_list.append(x_list_j)
_, y_list = self.layers(x0, x_list)
return x0, y_list
class HRInitBlock(HybridBlock):
"""
HRNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
num_subblocks : int
Number of subblocks.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
num_subblocks,
bn_use_global_stats,
**kwargs):
super(HRInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels
self.subblocks = nn.HybridSequential(prefix="")
for i in range(num_subblocks):
self.subblocks.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
bottleneck=True,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.subblocks(x)
return x
class HRFinalBlock(HybridBlock):
"""
HRNet specific final block.
Parameters:
----------
in_channels_list : list of int
Number of input channels per stage.
out_channels_list : list of int
Number of output channels per stage.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels_list,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(HRFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.inc_blocks = nn.HybridSequential(prefix="")
for i, in_channels_i in enumerate(in_channels_list):
self.inc_blocks.add(ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_list[i],
strides=1,
bottleneck=True,
bn_use_global_stats=bn_use_global_stats))
self.down_blocks = nn.HybridSequential(prefix="")
for i in range(len(in_channels_list) - 1):
self.down_blocks.add(conv3x3_block(
in_channels=out_channels_list[i],
out_channels=out_channels_list[i + 1],
strides=2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats))
self.final_layer = conv1x1_block(
in_channels=1024,
out_channels=2048,
strides=1,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x0, x):
y = self.inc_blocks[0](x[0])
for i in range(len(self.down_blocks)):
y = self.inc_blocks[i + 1](x[i + 1]) + self.down_blocks[i](y)
y = self.final_layer(y)
return y, y
class HRNet(HybridBlock):
"""
HRNet model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_num_subblocks : int
Number of subblocks in the initial unit.
num_modules : int
Number of modules per stage.
num_subblocks : list of int
Number of subblocks per stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
init_num_subblocks,
num_modules,
num_subblocks,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(HRNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.branches = [2, 3, 4]
with self.name_scope():
self.features = DualPathSequential(
first_ordinals=1,
last_ordinals=1,
dual_path_scheme_ordinal=(lambda block, x1, x2: (x1, block(x2))),
prefix="")
self.features.add(HRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
mid_channels=64,
num_subblocks=init_num_subblocks,
bn_use_global_stats=bn_use_global_stats))
in_channels_list = [init_block_channels]
for i in range(len(self.branches)):
self.features.add(HRStage(
in_channels_list=in_channels_list,
out_channels_list=channels[i],
num_modules=num_modules[i],
num_branches=self.branches[i],
num_subblocks=num_subblocks[i],
bn_use_global_stats=bn_use_global_stats))
in_channels_list = self.features[-1].in_channels_list
self.features.add(HRFinalBlock(
in_channels_list=in_channels_list,
out_channels_list=[128, 256, 512, 1024],
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=2048))
def hybrid_forward(self, F, x):
_, x = self.features(x, x)
x = self.output(x)
return x
def get_hrnet(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create HRNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "w18s1":
init_block_channels = 128
init_num_subblocks = 1
channels = [[16, 32], [16, 32, 64], [16, 32, 64, 128]]
num_modules = [1, 1, 1]
elif version == "w18s2":
init_block_channels = 256
init_num_subblocks = 2
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 3, 2]
elif version == "w18":
init_block_channels = 256
init_num_subblocks = 4
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 4, 3]
elif version == "w30":
init_block_channels = 256
init_num_subblocks = 4
channels = [[30, 60], [30, 60, 120], [30, 60, 120, 240]]
num_modules = [1, 4, 3]
elif version == "w32":
init_block_channels = 256
init_num_subblocks = 4
channels = [[32, 64], [32, 64, 128], [32, 64, 128, 256]]
num_modules = [1, 4, 3]
elif version == "w40":
init_block_channels = 256
init_num_subblocks = 4
channels = [[40, 80], [40, 80, 160], [40, 80, 160, 320]]
num_modules = [1, 4, 3]
elif version == "w44":
init_block_channels = 256
init_num_subblocks = 4
channels = [[44, 88], [44, 88, 176], [44, 88, 176, 352]]
num_modules = [1, 4, 3]
elif version == "w48":
init_block_channels = 256
init_num_subblocks = 4
channels = [[48, 96], [48, 96, 192], [48, 96, 192, 384]]
num_modules = [1, 4, 3]
elif version == "w64":
init_block_channels = 256
init_num_subblocks = 4
channels = [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
num_modules = [1, 4, 3]
else:
raise ValueError("Unsupported HRNet version {}".format(version))
num_subblocks = [[max(2, init_num_subblocks)] * len(ci) for ci in channels]
net = HRNet(
channels=channels,
init_block_channels=init_block_channels,
init_num_subblocks=init_num_subblocks,
num_modules=num_modules,
num_subblocks=num_subblocks,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def hrnet_w18_small_v1(**kwargs):
"""
HRNet-W18 Small V1 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s1", model_name="hrnet_w18_small_v1", **kwargs)
def hrnet_w18_small_v2(**kwargs):
"""
HRNet-W18 Small V2 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s2", model_name="hrnet_w18_small_v2", **kwargs)
def hrnetv2_w18(**kwargs):
"""
HRNetV2-W18 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18", model_name="hrnetv2_w18", **kwargs)
def hrnetv2_w30(**kwargs):
"""
HRNetV2-W30 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w30", model_name="hrnetv2_w30", **kwargs)
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
def hrnetv2_w40(**kwargs):
"""
HRNetV2-W40 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w40", model_name="hrnetv2_w40", **kwargs)
def hrnetv2_w44(**kwargs):
"""
HRNetV2-W44 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w44", model_name="hrnetv2_w44", **kwargs)
def hrnetv2_w48(**kwargs):
"""
HRNetV2-W48 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w48", model_name="hrnetv2_w48", **kwargs)
def hrnetv2_w64(**kwargs):
"""
HRNetV2-W64 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w64", model_name="hrnetv2_w64", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
hrnet_w18_small_v1,
hrnet_w18_small_v2,
hrnetv2_w18,
hrnetv2_w30,
hrnetv2_w32,
hrnetv2_w40,
hrnetv2_w44,
hrnetv2_w48,
hrnetv2_w64,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hrnet_w18_small_v1 or weight_count == 13187464)
assert (model != hrnet_w18_small_v2 or weight_count == 15597464)
assert (model != hrnetv2_w18 or weight_count == 21299004)
assert (model != hrnetv2_w30 or weight_count == 37712220)
assert (model != hrnetv2_w32 or weight_count == 41232680)
assert (model != hrnetv2_w40 or weight_count == 57557160)
assert (model != hrnetv2_w44 or weight_count == 67064984)
assert (model != hrnetv2_w48 or weight_count == 77469864)
assert (model != hrnetv2_w64 or weight_count == 128059944)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 26,230 | 35.381415 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fcn8sd.py | """
FCN-8s(d) for image segmentation, implemented in Gluon.
Original paper: 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
"""
__all__ = ['FCN8sd', 'fcn8sd_resnetd50b_voc', 'fcn8sd_resnetd101b_voc', 'fcn8sd_resnetd50b_coco',
'fcn8sd_resnetd101b_coco', 'fcn8sd_resnetd50b_ade20k', 'fcn8sd_resnetd101b_ade20k',
'fcn8sd_resnetd50b_cityscapes', 'fcn8sd_resnetd101b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class FCNFinalBlock(HybridBlock):
"""
FCN-8s(d) final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(FCNFinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
return x
class FCN8sd(HybridBlock):
"""
FCN-8s(d) model from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
It is an experimental model mixed FCN-8s and PSPNet.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(FCN8sd, self).__init__(**kwargs)
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
pool_out_channels = backbone_out_channels
self.final_block = FCNFinalBlock(
in_channels=pool_out_channels,
out_channels=classes)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = FCNFinalBlock(
in_channels=aux_out_channels,
out_channels=classes)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_fcn8sd(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FCN-8s(d) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = FCN8sd(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def fcn8sd_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_voc", **kwargs)
def fcn8sd_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_voc", **kwargs)
def fcn8sd_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_coco", **kwargs)
def fcn8sd_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_coco", **kwargs)
def fcn8sd_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_ade20k", **kwargs)
def fcn8sd_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_ade20k", **kwargs)
def fcn8sd_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_cityscapes", **kwargs)
def fcn8sd_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
(fcn8sd_resnetd50b_voc, 21),
(fcn8sd_resnetd101b_voc, 21),
(fcn8sd_resnetd50b_coco, 21),
(fcn8sd_resnetd101b_coco, 21),
(fcn8sd_resnetd50b_ade20k, 150),
(fcn8sd_resnetd101b_ade20k, 150),
(fcn8sd_resnetd50b_cityscapes, 19),
(fcn8sd_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 35545324)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 54537452)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 35444454)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 54436582)
else:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 33146966)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 52139094)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 33079763)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 52071891)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 16,570 | 38.267773 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/selecsls.py | """
SelecSLS for ImageNet-1K, implemented in Gluon.
Original paper: 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
"""
__all__ = ['SelecSLS', 'selecsls42', 'selecsls42b', 'selecsls60', 'selecsls60b', 'selecsls84']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, DualPathSequential
class SelecSLSBlock(HybridBlock):
"""
SelecSLS block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(SelecSLSBlock, self).__init__(**kwargs)
mid_channels = 2 * out_channels
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SelecSLSUnit(HybridBlock):
"""
SelecSLS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
skip_channels : int
Number of skipped channels.
mid_channels : int
Number of middle channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
skip_channels,
mid_channels,
strides,
bn_use_global_stats,
**kwargs):
super(SelecSLSUnit, self).__init__(**kwargs)
self.resize = (strides == 2)
mid2_channels = mid_channels // 2
last_channels = 2 * mid_channels + (skip_channels if strides == 1 else 0)
with self.name_scope():
self.branch1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.branch2 = SelecSLSBlock(
in_channels=mid_channels,
out_channels=mid2_channels,
bn_use_global_stats=bn_use_global_stats)
self.branch3 = SelecSLSBlock(
in_channels=mid2_channels,
out_channels=mid2_channels,
bn_use_global_stats=bn_use_global_stats)
self.last_conv = conv1x1_block(
in_channels=last_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x, x0=None):
x1 = self.branch1(x)
x2 = self.branch2(x1)
x3 = self.branch3(x2)
if self.resize:
y = F.concat(x1, x2, x3, dim=1)
y = self.last_conv(y)
return y, y
else:
y = F.concat(x1, x2, x3, x0, dim=1)
y = self.last_conv(y)
return y, x0
class SelecSLS(HybridBlock):
"""
SelecSLS model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
skip_channels : list of list of int
Number of skipped channels for each unit.
mid_channels : list of list of int
Number of middle channels for each unit.
kernels3 : list of list of int/bool
Using 3x3 (instead of 1x1) kernel for each head unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
skip_channels,
mid_channels,
kernels3,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SelecSLS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
init_block_channels = 32
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=(1 + len(kernels3)),
prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
k = i - len(skip_channels)
stage = DualPathSequential(prefix="stage{}_".format(i + 1)) if k < 0 else\
nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if j == 0 else 1
if k < 0:
unit = SelecSLSUnit(
in_channels=in_channels,
out_channels=out_channels,
skip_channels=skip_channels[i][j],
mid_channels=mid_channels[i][j],
strides=strides,
bn_use_global_stats=bn_use_global_stats)
else:
conv_block_class = conv3x3_block if kernels3[k][j] == 1 else conv1x1_block
unit = conv_block_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
stage.add(unit)
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=4,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_selecsls(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SelecSLS model with specific parameters.
Parameters:
----------
version : str
Version of SelecSLS.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version in ("42", "42b"):
channels = [[64, 128], [144, 288], [304, 480]]
skip_channels = [[0, 64], [0, 144], [0, 304]]
mid_channels = [[64, 64], [144, 144], [304, 304]]
kernels3 = [[1, 1], [1, 0]]
if version == "42":
head_channels = [[960, 1024], [1024, 1280]]
else:
head_channels = [[960, 1024], [1280, 1024]]
elif version in ("60", "60b"):
channels = [[64, 128], [128, 128, 288], [288, 288, 288, 416]]
skip_channels = [[0, 64], [0, 128, 128], [0, 288, 288, 288]]
mid_channels = [[64, 64], [128, 128, 128], [288, 288, 288, 288]]
kernels3 = [[1, 1], [1, 0]]
if version == "60":
head_channels = [[756, 1024], [1024, 1280]]
else:
head_channels = [[756, 1024], [1280, 1024]]
elif version == "84":
channels = [[64, 144], [144, 144, 144, 144, 304], [304, 304, 304, 304, 304, 512]]
skip_channels = [[0, 64], [0, 144, 144, 144, 144], [0, 304, 304, 304, 304, 304]]
mid_channels = [[64, 64], [144, 144, 144, 144, 144], [304, 304, 304, 304, 304, 304]]
kernels3 = [[1, 1], [1, 1]]
head_channels = [[960, 1024], [1024, 1280]]
else:
raise ValueError("Unsupported SelecSLS version {}".format(version))
channels += head_channels
net = SelecSLS(
channels=channels,
skip_channels=skip_channels,
mid_channels=mid_channels,
kernels3=kernels3,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def selecsls42(**kwargs):
"""
SelecSLS-42 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42", model_name="selecsls42", **kwargs)
def selecsls42b(**kwargs):
"""
SelecSLS-42b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42b", model_name="selecsls42b", **kwargs)
def selecsls60(**kwargs):
"""
SelecSLS-60 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60", model_name="selecsls60", **kwargs)
def selecsls60b(**kwargs):
"""
SelecSLS-60b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60b", model_name="selecsls60b", **kwargs)
def selecsls84(**kwargs):
"""
SelecSLS-84 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="84", model_name="selecsls84", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
selecsls42,
selecsls42b,
selecsls60,
selecsls60b,
selecsls84,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != selecsls42 or weight_count == 30354952)
assert (model != selecsls42b or weight_count == 32458248)
assert (model != selecsls60 or weight_count == 30670768)
assert (model != selecsls60b or weight_count == 32774064)
assert (model != selecsls84 or weight_count == 50954600)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,256 | 33.943627 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/inceptionv4.py | """
InceptionV4 for ImageNet-1K, implemented in Gluon.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionV4', 'inceptionv4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import ConvBlock, conv3x3_block
from .inceptionv3 import MaxPoolBranch, AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
class Conv3x3Branch(HybridBlock):
"""
InceptionV4 specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(Conv3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class ConvSeq3x3Branch(HybridBlock):
"""
InceptionV4 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels_list : list of tuple of int
List of numbers of output channels for middle layers.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ConvSeq3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv_list = nn.HybridSequential(prefix="")
for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip(
mid_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add(ConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = mid_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv_list(x)
y1 = self.conv1x3(x)
y2 = self.conv3x1(x)
x = F.concat(y1, y2, dim=1)
return x
class InceptionAUnit(HybridBlock):
"""
InceptionV4 type Inception-A unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
in_channels = 384
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
count_include_pad=False))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionAUnit(HybridBlock):
"""
InceptionV4 type Reduction-A unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
in_channels = 384
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptionBUnit(HybridBlock):
"""
InceptionV4 type Inception-B unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
in_channels = 1024
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=384,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 224, 224, 256),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=128,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
count_include_pad=False))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBUnit(HybridBlock):
"""
InceptionV4 type Reduction-B unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
in_channels = 1024
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 320, 320),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptionCUnit(HybridBlock):
"""
InceptionV4 type Inception-C unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
in_channels = 1536
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=256,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384, 448, 512),
kernel_size_list=(1, (3, 1), (1, 3)),
strides_list=(1, 1, 1),
padding_list=(0, (1, 0), (0, 1)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=256,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
count_include_pad=False))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptBlock3a(HybridBlock):
"""
InceptionV4 type Mixed-3a block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock3a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(MaxPoolBranch())
self.branches.add(Conv3x3Branch(
in_channels=64,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptBlock4a(HybridBlock):
"""
InceptionV4 type Mixed-4a block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock4a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (0, 3), (3, 0), 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptBlock5a(HybridBlock):
"""
InceptionV4 type Mixed-5a block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock5a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv3x3Branch(
in_channels=192,
out_channels=192,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionV4 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.block1 = InceptBlock3a(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
self.block2 = InceptBlock4a(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
self.block3 = InceptBlock5a(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class InceptionV4(HybridBlock):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionV4, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
layers = [4, 8, 4]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
for i, layers_per_stage in enumerate(layers):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
stage.add(unit(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats))
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=1536))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionv4(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionV4 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = InceptionV4(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionv4(**kwargs):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionv4(model_name="inceptionv4", bn_epsilon=1e-3, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionv4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv4 or weight_count == 42679816)
x = mx.nd.random.normal(shape=(1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,613 | 33.573939 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/regnet.py | """
RegNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
"""
__all__ = ['RegNet', 'regnetx002', 'regnetx004', 'regnetx006', 'regnetx008', 'regnetx016', 'regnetx032', 'regnetx040',
'regnetx064', 'regnetx080', 'regnetx120', 'regnetx160', 'regnetx320', 'regnety002', 'regnety004',
'regnety006', 'regnety008', 'regnety016', 'regnety032', 'regnety040', 'regnety064', 'regnety080',
'regnety120', 'regnety160', 'regnety320', 'regnetz002', 'regnetw002']
import os
import numpy as np
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, SEBlock
class RegNetBottleneck(HybridBlock):
"""
RegNet bottleneck block for residual path in RegNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bottleneck_factor : int, default 1
Bottleneck factor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups,
use_se,
bottleneck_factor=1,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(RegNetBottleneck, self).__init__(**kwargs)
self.use_se = use_se
mid_channels = out_channels // bottleneck_factor
mid_groups = mid_channels // groups
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
groups=mid_groups,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
mid_channels=(in_channels // 4))
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
return x
class RegNetUnit(HybridBlock):
"""
RegNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(RegNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = RegNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
groups=groups,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class RegNet(HybridBlock):
"""
RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : list of int
Number of groups for each stage.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(RegNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
padding=1,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, (channels_per_stage, groups_per_stage) in enumerate(zip(channels, groups)):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
stage.add(RegNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
groups=groups_per_stage,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_regnet(channels_init,
channels_slope,
channels_mult,
depth,
groups,
use_se=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create RegNet model with specific parameters.
Parameters:
----------
channels_init : float
Initial value for channels/widths.
channels_slope : float
Slope value for channels/widths.
width_mult : float
Width multiplier value.
groups : int
Number of groups.
depth : int
Depth value.
use_se : bool, default False
Whether to use SE-module.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
divisor = 8
assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0)
# Generate continuous per-block channels/widths:
channels_cont = np.arange(depth) * channels_slope + channels_init
# Generate quantized per-block channels/widths:
channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult))
channels = channels_init * np.power(channels_mult, channels_exps)
channels = (np.round(channels / divisor) * divisor).astype(np.int)
# Generate per stage channels/widths and layers/depths:
channels_per_stage, layers = np.unique(channels, return_counts=True)
# Adjusts the compatibility of channels/widths and groups:
groups_per_stage = [min(groups, c) for c in channels_per_stage]
channels_per_stage = [int(round(c / g) * g) for c, g in zip(channels_per_stage, groups_per_stage)]
channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)]
init_block_channels = 32
net = RegNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups_per_stage,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def regnetx002(**kwargs):
"""
RegNetX-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8,
model_name="regnetx002", **kwargs)
def regnetx004(**kwargs):
"""
RegNetX-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22, groups=16,
model_name="regnetx004", **kwargs)
def regnetx006(**kwargs):
"""
RegNetX-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16, groups=24,
model_name="regnetx006", **kwargs)
def regnetx008(**kwargs):
"""
RegNetX-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16, groups=16,
model_name="regnetx008", **kwargs)
def regnetx016(**kwargs):
"""
RegNetX-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18, groups=24,
model_name="regnetx016", **kwargs)
def regnetx032(**kwargs):
"""
RegNetX-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25, groups=48,
model_name="regnetx032", **kwargs)
def regnetx040(**kwargs):
"""
RegNetX-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23, groups=40,
model_name="regnetx040", **kwargs)
def regnetx064(**kwargs):
"""
RegNetX-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17, groups=56,
model_name="regnetx064", **kwargs)
def regnetx080(**kwargs):
"""
RegNetX-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23, groups=120,
model_name="regnetx080", **kwargs)
def regnetx120(**kwargs):
"""
RegNetX-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112,
model_name="regnetx120", **kwargs)
def regnetx160(**kwargs):
"""
RegNetX-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22, groups=128,
model_name="regnetx160", **kwargs)
def regnetx320(**kwargs):
"""
RegNetX-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23, groups=168,
model_name="regnetx320", **kwargs)
def regnety002(**kwargs):
"""
RegNetY-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, use_se=True,
model_name="regnety002", **kwargs)
def regnety004(**kwargs):
"""
RegNetY-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=27.89, channels_mult=2.09, depth=16, groups=8, use_se=True,
model_name="regnety004", **kwargs)
def regnety006(**kwargs):
"""
RegNetY-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=32.54, channels_mult=2.32, depth=15, groups=16, use_se=True,
model_name="regnety006", **kwargs)
def regnety008(**kwargs):
"""
RegNetY-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=38.84, channels_mult=2.4, depth=14, groups=16, use_se=True,
model_name="regnety008", **kwargs)
def regnety016(**kwargs):
"""
RegNetY-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=20.71, channels_mult=2.65, depth=27, groups=24, use_se=True,
model_name="regnety016", **kwargs)
def regnety032(**kwargs):
"""
RegNetY-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=42.63, channels_mult=2.66, depth=21, groups=24, use_se=True,
model_name="regnety032", **kwargs)
def regnety040(**kwargs):
"""
RegNetY-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=31.41, channels_mult=2.24, depth=22, groups=64, use_se=True,
model_name="regnety040", **kwargs)
def regnety064(**kwargs):
"""
RegNetY-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=112, channels_slope=33.22, channels_mult=2.27, depth=25, groups=72, use_se=True,
model_name="regnety064", **kwargs)
def regnety080(**kwargs):
"""
RegNetY-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=192, channels_slope=76.82, channels_mult=2.19, depth=17, groups=56, use_se=True,
model_name="regnety080", **kwargs)
def regnety120(**kwargs):
"""
RegNetY-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, use_se=True,
model_name="regnety120", **kwargs)
def regnety160(**kwargs):
"""
RegNetY-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=200, channels_slope=106.23, channels_mult=2.48, depth=18, groups=112, use_se=True,
model_name="regnety160", **kwargs)
def regnety320(**kwargs):
"""
RegNetY-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=232, channels_slope=115.89, channels_mult=2.53, depth=20, groups=232, use_se=True,
model_name="regnety320", **kwargs)
def regnetz002(**kwargs):
"""
RegNetZ-200MF experimental model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=1,
model_name="regnetz002", **kwargs)
def regnetw002(**kwargs):
"""
RegNetW-200MF experimental model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=1024,
model_name="regnetw002", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
regnetx002,
regnetx004,
regnetx006,
regnetx008,
regnetx016,
regnetx032,
regnetx040,
regnetx064,
regnetx080,
regnetx120,
regnetx160,
regnetx320,
regnety002,
regnety004,
regnety006,
regnety008,
regnety016,
regnety032,
regnety040,
regnety064,
regnety080,
regnety120,
regnety160,
regnety320,
regnetz002,
regnetw002,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != regnetx002 or weight_count == 2684792)
assert (model != regnetx004 or weight_count == 5157512)
assert (model != regnetx006 or weight_count == 6196040)
assert (model != regnetx008 or weight_count == 7259656)
assert (model != regnetx016 or weight_count == 9190136)
assert (model != regnetx032 or weight_count == 15296552)
assert (model != regnetx040 or weight_count == 22118248)
assert (model != regnetx064 or weight_count == 26209256)
assert (model != regnetx080 or weight_count == 39572648)
assert (model != regnetx120 or weight_count == 46106056)
assert (model != regnetx160 or weight_count == 54278536)
assert (model != regnetx320 or weight_count == 107811560)
assert (model != regnety002 or weight_count == 3162996)
assert (model != regnety004 or weight_count == 4344144)
assert (model != regnety006 or weight_count == 6055160)
assert (model != regnety008 or weight_count == 6263168)
assert (model != regnety016 or weight_count == 11202430)
assert (model != regnety032 or weight_count == 19436338)
assert (model != regnety040 or weight_count == 20646656)
assert (model != regnety064 or weight_count == 30583252)
assert (model != regnety080 or weight_count == 39180068)
assert (model != regnety120 or weight_count == 51822544)
assert (model != regnety160 or weight_count == 83590140)
assert (model != regnety320 or weight_count == 145046770)
assert (model != regnetz002 or weight_count == 2479160)
assert (model != regnetw002 or weight_count == 11846648)
batch = 14
size = 224
x = mx.nd.zeros((batch, 3, size, size), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 30,188 | 34.896552 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/icnet.py | """
ICNet for image segmentation, implemented in Gluon.
Original paper: 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
"""
__all__ = ['ICNet', 'icnet_resnetd50b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .pspnet import PyramidPooling
from .resnetd import resnetd50b
class ICInitBlock(HybridBlock):
"""
ICNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ICInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PSPBlock(HybridBlock):
"""
ICNet specific PSPNet reduced head block.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
bottleneck_factor : int
Bottleneck factor.
"""
def __init__(self,
in_channels,
upscale_out_size,
bottleneck_factor,
**kwargs):
super(PSPBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.pool = PyramidPooling(
in_channels=in_channels,
upscale_out_size=upscale_out_size)
self.conv = conv3x3_block(
in_channels=4096,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
x = self.dropout(x)
return x
class CFFBlock(HybridBlock):
"""
Cascade Feature Fusion block.
Parameters:
----------
in_channels_low : int
Number of input channels (low input).
in_channels_high : int
Number of input channels (low high).
out_channels : int
Number of output channels.
out_size : tuple of two ints
Spatial size of the expected output image.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels_low,
in_channels_high,
out_channels,
out_size,
classes,
**kwargs):
super(CFFBlock, self).__init__(**kwargs)
with self.name_scope():
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
self.conv_low = conv3x3_block(
in_channels=in_channels_low,
out_channels=out_channels,
padding=2,
dilation=2,
activation=None)
self.conv_hign = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
activation=None)
self.activ = nn.Activation("relu")
self.conv_cls = conv1x1(
in_channels=out_channels,
out_channels=classes)
def hybrid_forward(self, F, xl, xh):
xl = self.up(xl)
xl = self.conv_low(xl)
xh = self.conv_hign(xh)
x = xl + xh
x = self.activ(x)
x_cls = self.conv_cls(xl)
return x, x_cls
class ICHeadBlock(HybridBlock):
"""
ICNet head block.
Parameters:
----------
in_size : tuple of two ints
Spatial size of the expected output image.
classes : int
Number of classification classes.
"""
def __init__(self,
in_size,
classes,
**kwargs):
super(ICHeadBlock, self).__init__(**kwargs)
with self.name_scope():
self.cff_12 = CFFBlock(
in_channels_low=128,
in_channels_high=64,
out_channels=128,
classes=classes,
out_size=(in_size[0] // 8, in_size[1] // 8) if in_size is not None else None)
self.cff_24 = CFFBlock(
in_channels_low=256,
in_channels_high=256,
out_channels=128,
classes=classes,
out_size=(in_size[0] // 16, in_size[1] // 16) if in_size is not None else None)
self.up_x2 = InterpolationBlock(
scale_factor=2,
out_size=(in_size[0] // 4, in_size[1] // 4) if in_size is not None else None)
self.up_x8 = InterpolationBlock(
scale_factor=4,
out_size=in_size)
self.conv_cls = conv1x1(
in_channels=128,
out_channels=classes)
def hybrid_forward(self, F, x1, x2, x4):
outputs = []
x_cff_24, x_24_cls = self.cff_24(x4, x2)
outputs.append(x_24_cls)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x1)
outputs.append(x_12_cls)
up_x2 = self.up_x2(x_cff_12)
up_x2 = self.conv_cls(up_x2)
outputs.append(up_x2)
up_x8 = self.up_x8(up_x2)
outputs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outputs.reverse()
return tuple(outputs)
class ICNet(HybridBlock):
"""
ICNet model from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
channels : tuple of int
Number of output channels for each branch.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbones,
backbones_out_channels,
channels,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(ICNet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
psp_pool_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
psp_head_out_channels = 512
with self.name_scope():
self.branch1 = ICInitBlock(
in_channels=in_channels,
out_channels=channels[0])
self.branch2 = MultiOutputSequential(prefix="")
self.branch2.add(InterpolationBlock(
scale_factor=2,
out_size=(in_size[0] // 2, in_size[1] // 2) if fixed_size else None,
up=False))
backbones[0].do_output = True
self.branch2.add(backbones[0])
self.branch2.add(InterpolationBlock(
scale_factor=2,
out_size=(in_size[0] // 32, in_size[1] // 32) if fixed_size else None,
up=False))
self.branch2.add(backbones[1])
self.branch2.add(PSPBlock(
in_channels=backbones_out_channels[1],
upscale_out_size=psp_pool_out_size,
bottleneck_factor=4))
self.branch2.add(conv1x1_block(
in_channels=psp_head_out_channels,
out_channels=channels[2]))
self.conv_y2 = conv1x1_block(
in_channels=backbones_out_channels[0],
out_channels=channels[1])
self.final_block = ICHeadBlock(
in_size=in_size if fixed_size else None,
classes=classes)
def hybrid_forward(self, F, x):
y1 = self.branch1(x)
y3, y2 = self.branch2(x)
y2 = self.conv_y2(y2)
x = self.final_block(y1, y2, y3)
if self.aux:
return x
else:
return x[0]
def get_icnet(backbones,
backbones_out_channels,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ICNet model with specific parameters.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = (64, 256, 256)
backbones[0].multi_output = False
backbones[1].multi_output = False
net = ICNet(
backbones=backbones,
backbones_out_channels=backbones_out_channels,
channels=channels,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def icnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
ICNet model on the base of ResNet(D)-50b for Cityscapes from 'ICNet for Real-Time Semantic Segmentation on
High-Resolution Images,' https://arxiv.org/abs/1704.08545.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features[:-1]
backbones = (backbone[:3], backbone[3:])
backbones_out_channels = (512, 2048)
return get_icnet(backbones=backbones, backbones_out_channels=backbones_out_channels, classes=classes, aux=aux,
model_name="icnet_resnetd50b_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (1024, 2048)
aux = False
pretrained = False
fixed_size = False
models = [
(icnet_resnetd50b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != icnet_resnetd50b_cityscapes or weight_count == 47489184)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 14,177 | 31.668203 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mobilenetb.py | """
MobileNet(B) with simplified depthwise separable convolution block for ImageNet-1K, implemented in Gluon.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['mobilenetb_w1', 'mobilenetb_w3d4', 'mobilenetb_wd2', 'mobilenetb_wd4']
from .mobilenet import get_mobilenet
def mobilenetb_w1(**kwargs):
"""
1.0 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, dws_simplified=True, model_name="mobilenetb_w1", **kwargs)
def mobilenetb_w3d4(**kwargs):
"""
0.75 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, dws_simplified=True, model_name="mobilenetb_w3d4", **kwargs)
def mobilenetb_wd2(**kwargs):
"""
0.5 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, dws_simplified=True, model_name="mobilenetb_wd2", **kwargs)
def mobilenetb_wd4(**kwargs):
"""
0.25 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, dws_simplified=True, model_name="mobilenetb_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenetb_w1,
mobilenetb_w3d4,
mobilenetb_wd2,
mobilenetb_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetb_w1 or weight_count == 4222056)
assert (model != mobilenetb_w3d4 or weight_count == 2578120)
assert (model != mobilenetb_wd2 or weight_count == 1326632)
assert (model != mobilenetb_wd4 or weight_count == 467592)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 4,189 | 33.916667 | 113 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shakedropresnet_cifar.py | """
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(mx.autograd.Function):
"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""
def __init__(self, p):
super(ShakeDrop, self).__init__()
self.p = p
def forward(self, x):
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.p)
alpha = mx.nd.random.uniform_like(x.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=-1.0, high=1.0)
y = mx.nd.broadcast_mul(b + alpha - b * alpha, x)
self.save_for_backward(b)
else:
y = self.p * x
return y
def backward(self, dy):
b, = self.saved_tensors
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=0.0, high=1.0)
return mx.nd.broadcast_mul(b + beta - b * beta, dy)
class ShakeDropResUnit(HybridBlock):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ShakeDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
# self.shake_drop = ShakeDrop(self.life_prob)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = ShakeDrop(self.life_prob)(x) + identity
# x = self.shake_drop(x) + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(HybridBlock):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
# y = net(x)
with mx.autograd.record():
y = net(x)
y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 12,306 | 33.570225 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/inceptionresnetv1.py | """
InceptionResNetV1 for ImageNet-1K, implemented in Gluon.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit',
'ReductionAUnit', 'ReductionBUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv1x1_block, conv3x3_block, BatchNormExtra
from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch
class InceptionAUnit(HybridBlock):
"""
InceptionResNetV1 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
self.scale = 0.17
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:3],
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[3:6],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionBUnit(HybridBlock):
"""
InceptionResNetV1 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
self.scale = 0.10
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionCUnit(HybridBlock):
"""
InceptionResNetV1 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
scale : float, default 0.2
Scale value for residual branch.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats=False,
scale=0.2,
activate=True,
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
self.activate = activate
self.scale = scale
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
if self.activate:
x = self.activ(x)
return x
class ReductionAUnit(HybridBlock):
"""
InceptionResNetV1 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:1],
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBUnit(HybridBlock):
"""
InceptionResNetV1 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:2],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[2:4],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[4:7],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionResNetV1 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv6 = conv3x3_block(
in_channels=192,
out_channels=256,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
return x
class InceptHead(HybridBlock):
"""
InceptionResNetV1 specific classification block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
dropout_rate,
classes,
**kwargs):
super(InceptHead, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.flatten = nn.Flatten()
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
self.fc1 = nn.Dense(
units=512,
use_bias=False,
in_units=in_channels)
self.bn = BatchNormExtra(
in_channels=512,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats)
self.fc2 = nn.Dense(
units=classes,
in_units=512)
def hybrid_forward(self, F, x):
x = self.flatten(x)
if self.use_dropout:
x = self.dropout(x)
x = self.fc1(x)
x = self.bn(x)
x = self.fc2(x)
return x
class InceptionResNetV1(HybridBlock):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionResNetV1, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
layers = [5, 11, 7]
in_channels_list = [256, 896, 1792]
normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]]
reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = InceptHead(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
classes=classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionresnetv1(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionResNetV1 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV1(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_epsilon=1e-3, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionresnetv1,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv1 or weight_count == 23995624)
x = mx.nd.zeros((1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,298 | 34.204959 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/scnet.py | """
SCNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
"""
__all__ = ['SCNet', 'scnet50', 'scnet101', 'scneta50', 'scneta101']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, InterpolationBlock
from .resnet import ResInitBlock
from .senet import SEInitBlock
from .resnesta import ResNeStADownBlock
class ScDownBlock(HybridBlock):
"""
SCNet specific convolutional downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_size: int or list/tuple of 2 ints, default 2
Size of the average pooling windows.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
pool_size=2,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ScDownBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=pool_size,
strides=pool_size)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class ScConv(HybridBlock):
"""
Self-calibrated convolutional block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
scale_factor : int
Scale factor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int, default None
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
scale_factor,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_size=None,
**kwargs):
super(ScConv, self).__init__(**kwargs)
self.in_size = in_size
with self.name_scope():
self.down = ScDownBlock(
in_channels=in_channels,
out_channels=out_channels,
pool_size=scale_factor,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.up = InterpolationBlock(scale_factor=scale_factor, bilinear=False)
self.sigmoid = nn.Activation("sigmoid")
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.conv2 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
w = self.sigmoid(x + self.up(self.down(x), in_size))
x = self.conv1(x) * w
x = self.conv2(x)
return x
class ScBottleneck(HybridBlock):
"""
SCNet specific bottleneck block for residual path in SCNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
scale_factor : int, default 4
Scale factor.
avg_downsample : bool, default False
Whether to use average downsampling.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int, default None
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor=4,
scale_factor=4,
avg_downsample=False,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_size=None,
**kwargs):
super(ScBottleneck, self).__init__(**kwargs)
self.avg_resize = (strides > 1) and avg_downsample
mid_channels = out_channels // bottleneck_factor // 2
with self.name_scope():
self.conv1a = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2a = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if self.avg_resize else strides),
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv1b = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2b = ScConv(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if self.avg_resize else strides),
scale_factor=scale_factor,
in_size=in_size)
if self.avg_resize:
self.pool = nn.AvgPool2D(
pool_size=3,
strides=strides,
padding=1)
self.conv3 = conv1x1_block(
in_channels=(2 * mid_channels),
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
y = self.conv1a(x)
y = self.conv2a(y)
z = self.conv1b(x)
z = self.conv2b(z)
if self.avg_resize:
y = self.pool(y)
z = self.pool(z)
x = F.concat(y, z, dim=1)
x = self.conv3(x)
return x
class ScUnit(HybridBlock):
"""
SCNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
avg_downsample : bool, default False
Whether to use average downsampling.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int, default None
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
avg_downsample=False,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_size=None,
**kwargs):
super(ScUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ScBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
avg_downsample=avg_downsample,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=in_size)
if self.resize_identity:
if avg_downsample:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
else:
self.identity_block = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SCNet(HybridBlock):
"""
SCNet model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
se_init_block=False,
avg_downsample=False,
bn_use_global_stats=False,
bn_cudnn_off=False,
fixed_size=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SCNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
init_block_class = SEInitBlock if se_init_block else ResInitBlock
self.features.add(init_block_class(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
in_size = (in_size[0] // 4, in_size[1] // 4)
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ScUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
avg_downsample=avg_downsample,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=in_size))
in_channels = out_channels
if strides > 1:
in_size = (in_size[0] // 2, in_size[1] // 2) if fixed_size else None
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_scnet(blocks,
width_scale=1.0,
se_init_block=False,
avg_downsample=False,
init_block_channels_scale=1,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SCNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_scale : float, default 1.0
Scale factor for width of layers.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
init_block_channels_scale : int, default 1
Scale factor for number of output channels in the initial unit.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SCNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
init_block_channels *= init_block_channels_scale
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = SCNet(
channels=channels,
init_block_channels=init_block_channels,
se_init_block=se_init_block,
avg_downsample=avg_downsample,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
def scnet101(**kwargs):
"""
SCNet-101 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, model_name="scnet101", **kwargs)
def scneta50(**kwargs):
"""
SCNet(A)-50 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, se_init_block=True, avg_downsample=True, model_name="scneta50", **kwargs)
def scneta101(**kwargs):
"""
SCNet(A)-101 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, se_init_block=True, avg_downsample=True, init_block_channels_scale=2,
model_name="scneta101", **kwargs)
def _test():
import numpy as np
import mxnet as mx
fixed_size = True
pretrained = False
models = [
scnet50,
scnet101,
scneta50,
scneta101,
]
for model in models:
net = model(pretrained=pretrained, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != scnet50 or weight_count == 25564584)
assert (model != scnet101 or weight_count == 44565416)
assert (model != scneta50 or weight_count == 25583816)
assert (model != scneta101 or weight_count == 44689192)
batch = 1
x = mx.nd.random.normal(shape=(batch, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 19,878 | 33.814361 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/igcv3.py | """
IGCV3 for ImageNet-1K, implemented in Gluon.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ReLU6
class InvResUnit(HybridBlock):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : bool
Whether do expansion of channels.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
expansion,
**kwargs):
super(InvResUnit, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6())
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
x = self.c_shuffle(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class IGCV3(HybridBlock):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IGCV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add(InvResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_igcv3(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,243 | 33.280488 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnet_cifar.py | """
SE-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn',
'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn',
'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn',
'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn',
'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn',
'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn',
'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn',
'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .seresnet import SEResUnit
class CIFARSEResNet(HybridBlock):
"""
SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARSEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_seresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def seresnet20_cifar10(classes=10, **kwargs):
"""
SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10", **kwargs)
def seresnet20_cifar100(classes=100, **kwargs):
"""
SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100", **kwargs)
def seresnet20_svhn(classes=10, **kwargs):
"""
SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn", **kwargs)
def seresnet56_cifar10(classes=10, **kwargs):
"""
SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10", **kwargs)
def seresnet56_cifar100(classes=100, **kwargs):
"""
SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100", **kwargs)
def seresnet56_svhn(classes=10, **kwargs):
"""
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs)
def seresnet110_cifar10(classes=10, **kwargs):
"""
SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10", **kwargs)
def seresnet110_cifar100(classes=100, **kwargs):
"""
SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100",
**kwargs)
def seresnet110_svhn(classes=10, **kwargs):
"""
SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn", **kwargs)
def seresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10",
**kwargs)
def seresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100",
**kwargs)
def seresnet164bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn", **kwargs)
def seresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10",
**kwargs)
def seresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100",
**kwargs)
def seresnet272bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn", **kwargs)
def seresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10",
**kwargs)
def seresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100",
**kwargs)
def seresnet542bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn", **kwargs)
def seresnet1001_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10",
**kwargs)
def seresnet1001_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100",
**kwargs)
def seresnet1001_svhn(classes=10, **kwargs):
"""
SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn", **kwargs)
def seresnet1202_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10",
**kwargs)
def seresnet1202_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar100",
**kwargs)
def seresnet1202_svhn(classes=10, **kwargs):
"""
SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(seresnet20_cifar10, 10),
(seresnet20_cifar100, 100),
(seresnet20_svhn, 10),
(seresnet56_cifar10, 10),
(seresnet56_cifar100, 100),
(seresnet56_svhn, 10),
(seresnet110_cifar10, 10),
(seresnet110_cifar100, 100),
(seresnet110_svhn, 10),
(seresnet164bn_cifar10, 10),
(seresnet164bn_cifar100, 100),
(seresnet164bn_svhn, 10),
(seresnet272bn_cifar10, 10),
(seresnet272bn_cifar100, 100),
(seresnet272bn_svhn, 10),
(seresnet542bn_cifar10, 10),
(seresnet542bn_cifar100, 100),
(seresnet542bn_svhn, 10),
(seresnet1001_cifar10, 10),
(seresnet1001_cifar100, 100),
(seresnet1001_svhn, 10),
(seresnet1202_cifar10, 10),
(seresnet1202_cifar100, 100),
(seresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet20_cifar10 or weight_count == 274847)
assert (model != seresnet20_cifar100 or weight_count == 280697)
assert (model != seresnet20_svhn or weight_count == 274847)
assert (model != seresnet56_cifar10 or weight_count == 862889)
assert (model != seresnet56_cifar100 or weight_count == 868739)
assert (model != seresnet56_svhn or weight_count == 862889)
assert (model != seresnet110_cifar10 or weight_count == 1744952)
assert (model != seresnet110_cifar100 or weight_count == 1750802)
assert (model != seresnet110_svhn or weight_count == 1744952)
assert (model != seresnet164bn_cifar10 or weight_count == 1906258)
assert (model != seresnet164bn_cifar100 or weight_count == 1929388)
assert (model != seresnet164bn_svhn or weight_count == 1906258)
assert (model != seresnet272bn_cifar10 or weight_count == 3153826)
assert (model != seresnet272bn_cifar100 or weight_count == 3176956)
assert (model != seresnet272bn_svhn or weight_count == 3153826)
assert (model != seresnet542bn_cifar10 or weight_count == 6272746)
assert (model != seresnet542bn_cifar100 or weight_count == 6295876)
assert (model != seresnet542bn_svhn or weight_count == 6272746)
assert (model != seresnet1001_cifar10 or weight_count == 11574910)
assert (model != seresnet1001_cifar100 or weight_count == 11598040)
assert (model != seresnet1001_svhn or weight_count == 11574910)
assert (model != seresnet1202_cifar10 or weight_count == 19582226)
assert (model != seresnet1202_cifar100 or weight_count == 19588076)
assert (model != seresnet1202_svhn or weight_count == 19582226)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 25,848 | 36.846266 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnetd.py | """
ResNet(D) with dilation for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetD', 'resnetd50b', 'resnetd101b', 'resnetd152b']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import MultiOutputSequential
from .resnet import ResUnit, ResInitBlock
from .senet import SEInitBlock
class ResNetD(HybridBlock):
"""
ResNet(D) with dilation model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
ordinary_init : bool, default False
Whether to use original initial block or SENet one.
bends : tuple of int, default None
Numbers of bends for multiple output.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
bn_cudnn_off=False,
ordinary_init=False,
bends=None,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNetD, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.multi_output = (bends is not None)
with self.name_scope():
self.features = MultiOutputSequential(prefix="")
if ordinary_init:
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
else:
init_block_channels = 2 * init_block_channels
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
if self.multi_output and ((i + 1) in bends):
stage.do_output = True
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
outs = self.features(x)
x = outs[0]
x = self.output(x)
if self.multi_output:
return [x] + outs[1:]
else:
return x
def get_resnetd(blocks,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet(D) with dilation model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(D) with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetD(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnetd50b(**kwargs):
"""
ResNet(D)-50 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=50, conv1_stride=False, model_name="resnetd50b", **kwargs)
def resnetd101b(**kwargs):
"""
ResNet(D)-101 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=101, conv1_stride=False, model_name="resnetd101b", **kwargs)
def resnetd152b(**kwargs):
"""
ResNet(D)-152 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=152, conv1_stride=False, model_name="resnetd152b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
ordinary_init = False
bends = None
pretrained = False
models = [
resnetd50b,
resnetd101b,
resnetd152b,
]
for model in models:
net = model(
pretrained=pretrained,
ordinary_init=ordinary_init,
bends=bends)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if ordinary_init:
assert (model != resnetd50b or weight_count == 25557032)
assert (model != resnetd101b or weight_count == 44549160)
assert (model != resnetd152b or weight_count == 60192808)
else:
assert (model != resnetd50b or weight_count == 25680808)
assert (model != resnetd101b or weight_count == 44672936)
assert (model != resnetd152b or weight_count == 60316584)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
if bends is not None:
y = y[0]
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 10,821 | 34.250814 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/quartznet.py | """
QuartzNet for ASR, implemented in Gluon.
Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,'
https://arxiv.org/abs/1910.10261.
"""
__all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de',
'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru',
'quartznet15x5_ru34']
from .jasper import get_jasper
def quartznet5x5_en_ls(classes=29, **kwargs):
"""
QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet5x5_en_ls", **kwargs)
def quartznet15x5_en(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en", **kwargs)
def quartznet15x5_en_nr(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en_nr", **kwargs)
def quartznet15x5_fr(classes=43, **kwargs):
"""
QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 43
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_fr", **kwargs)
def quartznet15x5_de(classes=32, **kwargs):
"""
QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 32
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_de", **kwargs)
def quartznet15x5_it(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_it", **kwargs)
def quartznet15x5_es(classes=36, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 36
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_es", **kwargs)
def quartznet15x5_ca(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ca", **kwargs)
def quartznet15x5_pl(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_pl", **kwargs)
def quartznet15x5_ru(classes=35, **kwargs):
"""
QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 35
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru", **kwargs)
def quartznet15x5_ru34(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D
Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru34", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import numpy as np
import mxnet as mx
pretrained = False
from_audio = True
audio_features = 64
models = [
quartznet5x5_en_ls,
quartznet15x5_en,
quartznet15x5_en_nr,
quartznet15x5_fr,
quartznet15x5_de,
quartznet15x5_it,
quartznet15x5_es,
quartznet15x5_ca,
quartznet15x5_pl,
quartznet15x5_ru,
quartznet15x5_ru34,
]
for model in models:
net = model(
in_channels=audio_features,
from_audio=from_audio,
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != quartznet5x5_en_ls or weight_count == 6713181)
assert (model != quartznet15x5_en or weight_count == 18924381)
assert (model != quartznet15x5_en_nr or weight_count == 18924381)
assert (model != quartznet15x5_fr or weight_count == 18938731)
assert (model != quartznet15x5_de or weight_count == 18927456)
assert (model != quartznet15x5_it or weight_count == 18934631)
assert (model != quartznet15x5_es or weight_count == 18931556)
assert (model != quartznet15x5_ca or weight_count == 18934631)
assert (model != quartznet15x5_pl or weight_count == 18929506)
assert (model != quartznet15x5_ru or weight_count == 18930531)
assert (model != quartznet15x5_ru34 or weight_count == 18929506)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max)
x = mx.nd.random.normal(shape=x_shape, ctx=ctx)
x_len = mx.nd.array(seq_len, ctx=ctx, dtype=np.long)
y, y_len = net(x, x_len)
assert (y.shape[:2] == (batch, net.classes))
if from_audio:
assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 14,466 | 42.185075 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/preresnet.py | """
PreResNet for ImageNet-1K, implemented in Gluon.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck',
'PreResUnit', 'PreResInitBlock', 'PreResActivation']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1
class PreResBlock(HybridBlock):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
**kwargs):
super(PreResBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
return x, x_pre_activ
class PreResBottleneck(HybridBlock):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
conv1_stride,
**kwargs):
super(PreResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
bn_use_global_stats=bn_use_global_stats,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x, x_pre_activ
class PreResUnit(HybridBlock):
"""
PreResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bottleneck=True,
conv1_stride=False,
**kwargs):
super(PreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias)
def hybrid_forward(self, F, x):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class PreResInitBlock(HybridBlock):
"""
PreResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(PreResInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PreResActivation(HybridBlock):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats=False,
**kwargs):
super(PreResActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class PreResNet(HybridBlock):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(PreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 31,235 | 34.175676 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/lednet.py | """
LEDNet for image segmentation, implemented in Gluon.
Original paper: 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
"""
__all__ = ['LEDNet', 'lednet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, conv1x1_block, conv3x3_block, conv5x5_block, conv7x7_block, ConvBlock, NormActivation,\
ChannelShuffle, InterpolationBlock, Hourglass, BreakBlock
class AsymConvBlock(HybridBlock):
"""
Asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
lw_activation : function or str or None, default nn.Activation('relu')
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default nn.Activation('relu')
Activation function after the rightwise convolution block.
"""
def __init__(self,
channels,
kernel_size,
padding,
dilation=1,
groups=1,
use_bias=False,
lw_use_bn=True,
rw_use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
lw_activation=(lambda: nn.Activation("relu")),
rw_activation=(lambda: nn.Activation("relu")),
**kwargs):
super(AsymConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.lw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=1,
padding=(padding, 0),
dilation=(dilation, 1),
groups=groups,
use_bias=use_bias,
use_bn=lw_use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=lw_activation)
self.rw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=1,
padding=(0, padding),
dilation=(1, dilation),
groups=groups,
use_bias=use_bias,
use_bn=rw_use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=rw_activation)
def hybrid_forward(self, F, x):
x = self.lw_conv(x)
x = self.rw_conv(x)
return x
def asym_conv3x3_block(padding=1,
**kwargs):
"""
3x3 asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
lw_activation : function or str or None, default nn.Activation('relu')
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default nn.Activation('relu')
Activation function after the rightwise convolution block.
"""
return AsymConvBlock(
kernel_size=3,
padding=padding,
**kwargs)
class LEDDownBlock(HybridBlock):
"""
LEDNet specific downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
correct_size_mismatch,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LEDDownBlock, self).__init__(**kwargs)
self.correct_size_mismatch = correct_size_mismatch
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.conv = conv3x3(
in_channels=in_channels,
out_channels=(out_channels - in_channels),
strides=2,
use_bias=True)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
y1 = self.pool(x)
y2 = self.conv(x)
if self.correct_size_mismatch:
diff_h = y2.size()[2] - y1.size()[2]
diff_w = y2.size()[3] - y1.size()[3]
y1 = F.pad(
y1,
mode="constant",
pad_width=(0, 0, 0, 0, diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2),
constant_value=0)
x = F.concat(y2, y1, dim=1)
x = self.norm_activ(x)
return x
class LEDBranch(HybridBlock):
"""
LEDNet encoder branch.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LEDBranch, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.conv1 = asym_conv3x3_block(
channels=channels,
use_bias=True,
lw_use_bn=False,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = asym_conv3x3_block(
channels=channels,
padding=dilation,
dilation=dilation,
use_bias=True,
lw_use_bn=False,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
rw_activation=None)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class LEDUnit(HybridBlock):
"""
LEDNet encoder unit (Split-Shuffle-non-bottleneck).
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LEDUnit, self).__init__(**kwargs)
mid_channels = channels // 2
with self.name_scope():
self.left_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.right_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.activ = nn.Activation("relu")
self.shuffle = ChannelShuffle(
channels=channels,
groups=2)
def hybrid_forward(self, F, x):
identity = x
x1, x2 = F.split(x, axis=1, num_outputs=2)
x1 = self.left_branch(x1)
x2 = self.right_branch(x2)
x = F.concat(x1, x2, dim=1)
x = x + identity
x = self.activ(x)
x = self.shuffle(x)
return x
class PoolingBranch(HybridBlock):
"""
Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool
Whether the layer uses a bias vector.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int or None
Spatial size of input image.
down_size : int
Spatial size of downscaled image.
"""
def __init__(self,
in_channels,
out_channels,
use_bias,
bn_epsilon,
bn_use_global_stats,
bn_cudnn_off,
in_size,
down_size,
**kwargs):
super(PoolingBranch, self).__init__(**kwargs)
self.in_size = in_size
self.down_size = down_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=self.down_size)
x = self.conv(x)
x = self.up(x, in_size)
return x
class APN(HybridBlock):
"""
Attention pyramid network block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int or None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
bn_cudnn_off,
in_size,
**kwargs):
super(APN, self).__init__(**kwargs)
self.in_size = in_size
att_out_channels = 1
with self.name_scope():
self.pool_branch = PoolingBranch(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=in_size,
down_size=1)
self.body = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
down_seq = nn.HybridSequential(prefix="")
down_seq.add(conv7x7_block(
in_channels=in_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down_seq.add(conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down3_subseq = nn.HybridSequential(prefix="")
down3_subseq.add(conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down3_subseq.add(conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down_seq.add(down3_subseq)
up_seq = nn.HybridSequential(prefix="")
up = InterpolationBlock(scale_factor=2)
up_seq.add(up)
up_seq.add(up)
up_seq.add(up)
skip_seq = nn.HybridSequential(prefix="")
skip_seq.add(BreakBlock())
skip_seq.add(conv7x7_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
skip_seq.add(conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq)
def hybrid_forward(self, F, x):
y = self.pool_branch(x)
w = self.hg(x)
x = self.body(x)
x = x * w
x = x + y
return x
class LEDNet(HybridBlock):
"""
LEDNet model from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
dilations : list of int
Dilations for units.
dropout_rates : list of list of int
Dropout rates for each unit in encoder.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images in encoder.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
dilations,
dropout_rates,
correct_size_mismatch=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(LEDNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.encoder = nn.HybridSequential(prefix="")
for i, dilations_per_stage in enumerate(dilations):
out_channels = channels[i]
dropout_rate = dropout_rates[i]
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
for j, dilation in enumerate(dilations_per_stage):
if j == 0:
stage.add(LEDDownBlock(
in_channels=in_channels,
out_channels=out_channels,
correct_size_mismatch=correct_size_mismatch,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
else:
stage.add(LEDUnit(
channels=in_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.encoder.add(stage)
self.apn = APN(
in_channels=in_channels,
out_channels=classes,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=(in_size[0] // 8, in_size[1] // 8) if fixed_size else None)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
x = self.encoder(x)
x = self.apn(x)
x = self.up(x)
return x
def get_lednet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create LEDNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [32, 64, 128]
dilations = [[0, 1, 1, 1], [0, 1, 1], [0, 1, 2, 5, 9, 2, 5, 9, 17]]
dropout_rates = [0.03, 0.03, 0.3]
bn_epsilon = 1e-3
net = LEDNet(
channels=channels,
dilations=dilations,
dropout_rates=dropout_rates,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def lednet_cityscapes(classes=19, **kwargs):
"""
LEDNet model for Cityscapes from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic
Segmentation,' https://arxiv.org/abs/1905.02423.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_lednet(classes=classes, model_name="lednet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
correct_size_mismatch = False
in_size = (1024, 2048)
classes = 19
models = [
lednet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size,
correct_size_mismatch=correct_size_mismatch)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lednet_cityscapes or weight_count == 922821)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 24,449 | 33.48519 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/superpointnet.py | """
SuperPointNet for HPatches (image matching), implemented in Gluon.
Original paper: 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
"""
__all__ = ['SuperPointNet', 'superpointnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
def interpolate_bilinear(p,
img,
img_size,
transpose=True):
"""
Bilinear interpolation.
Parameters:
----------
p : NDArray
Float coordinates.
img : NDArray
original image.
img_size : tuple of two inst
Image size.
transpose : bool, default True
Whether do transpose of the output against input.
Returns:
-------
zz : NDArray
Interpolated values.
"""
p0 = p.floor().astype(int)
p1 = p0 + 1
x = p.slice_axis(axis=1, begin=0, end=1).squeeze(axis=1)
y = p.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1)
x0 = p0.slice_axis(axis=1, begin=0, end=1).squeeze(axis=1)
x1 = p1.slice_axis(axis=1, begin=0, end=1).squeeze(axis=1)
y0 = p0.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1)
y1 = p1.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1)
x0 = x0.clip(0, img_size[0] - 1)
x1 = x1.clip(0, img_size[0] - 1)
y0 = y0.clip(0, img_size[1] - 1)
y1 = y1.clip(0, img_size[1] - 1)
z00 = img[:, x0, y0].T
z01 = img[:, x0, y1].T
z10 = img[:, x1, y0].T
z11 = img[:, x1, y1].T
x0 = x0.astype(p.dtype)
x1 = x1.astype(p.dtype)
y0 = y0.astype(p.dtype)
y1 = y1.astype(p.dtype)
w00 = ((x - x0) * (y - y0)).expand_dims(axis=1)
w01 = ((x - x0) * (y1 - y)).expand_dims(axis=1)
w10 = ((x1 - x) * (y - y0)).expand_dims(axis=1)
w11 = ((x1 - x) * (y1 - y)).expand_dims(axis=1)
zz = (z00 * w11) + (z10 * w10) + (z01 * w01) + (z11 * w00)
if not transpose:
zz = zz.T
return zz
class SPHead(HybridBlock):
"""
SuperPointNet head block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
**kwargs):
super(SPHead, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=True,
use_bn=False)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SPDetector(HybridBlock):
"""
SuperPointNet detector.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
conf_thresh : float, default 0.015
Confidence threshold.
nms_dist : int, default 4
NMS distance.
use_batch_box_nms : bool, default True
Whether allow to hybridize this block.
hybridizable : bool, default True
Whether allow to hybridize this block.
batch_size : int, default 1
Batch size.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
reduction : int, default 8
Feature reduction factor.
"""
def __init__(self,
in_channels,
mid_channels,
conf_thresh=0.015,
nms_dist=4,
use_batch_box_nms=True,
hybridizable=True,
batch_size=1,
in_size=(224, 224),
reduction=8,
**kwargs):
super(SPDetector, self).__init__(**kwargs)
assert ((batch_size is not None) or not hybridizable)
assert ((in_size is not None) or not hybridizable)
assert (use_batch_box_nms or not hybridizable)
self.conf_thresh = conf_thresh
self.nms_dist = nms_dist
self.use_batch_box_nms = use_batch_box_nms
self.hybridizable = hybridizable
self.batch_size = batch_size
self.in_size = in_size
self.reduction = reduction
num_classes = reduction * reduction + 1
with self.name_scope():
self.detector = SPHead(
in_channels=in_channels,
mid_channels=mid_channels,
out_channels=num_classes)
def hybrid_forward(self, F, x):
semi = self.detector(x)
dense = semi.softmax(axis=1)
nodust = dense.slice_axis(axis=1, begin=0, end=-1)
heatmap = nodust.transpose(axes=(0, 2, 3, 1))
heatmap = heatmap.reshape(shape=(0, 0, 0, self.reduction, self.reduction))
heatmap = heatmap.transpose(axes=(0, 1, 3, 2, 4))
in_size = self.in_size if self.in_size is not None else (x.shape[2] * self.reduction,
x.shape[3] * self.reduction)
batch_size = self.batch_size if self.batch_size is not None else x.shape[0]
if self.use_batch_box_nms:
heatmap = heatmap.reshape(shape=(0, -1))
in_nms = F.stack(
heatmap,
F.arange(in_size[0], repeat=in_size[1]).tile((batch_size, 1)),
F.arange(in_size[1]).tile((batch_size, in_size[0])),
F.zeros_like(heatmap) + self.nms_dist,
F.zeros_like(heatmap) + self.nms_dist,
axis=2)
out_nms = F.contrib.box_nms(
data=in_nms,
overlap_thresh=1e-3,
valid_thresh=self.conf_thresh,
coord_start=1,
score_index=0,
id_index=-1,
force_suppress=False,
in_format="center",
out_format="center")
confs = out_nms.slice_axis(axis=2, begin=0, end=1).reshape(shape=(0, -1))
pts = out_nms.slice_axis(axis=2, begin=1, end=3)
if self.hybridizable:
return pts, confs
confs_list = []
pts_list = []
counts = (confs > 0).sum(axis=1)
for i in range(batch_size):
count_i = int(counts[i].asscalar())
confs_i = confs[i].slice_axis(axis=0, begin=0, end=count_i)
pts_i = pts[i].slice_axis(axis=0, begin=0, end=count_i)
confs_list.append(confs_i)
pts_list.append(pts_i)
return pts_list, confs_list
else:
img_height = in_size[0]
img_width = in_size[1]
heatmap = heatmap.reshape(shape=(0, -3, -3)).expand_dims(axis=1)
heatmap = F.where(heatmap >= self.conf_thresh, heatmap, F.zeros_like(heatmap))
heatmap_mask = (heatmap >= 0)
pad = self.nms_dist
pad_width = (0, 0, 0, 0, pad, pad, pad, pad)
heatmap_mask2 = heatmap_mask.pad(mode="constant", pad_width=pad_width, constant_value=0)
confs_list = []
pts_list = []
for i in range(batch_size):
heatmap_i = heatmap[i].squeeze(axis=0)
heatmap_i_csr = heatmap_i.tostype("csr")
row_sizes = heatmap_i_csr.indptr[1:] - heatmap_i_csr.indptr[:-1]
row_inds = heatmap_i_csr.data.zeros_like()
row_size_count = 0
for j, row_size in enumerate(row_sizes):
row_size_j = row_size.asscalar()
row_inds[row_size_count:(row_size_count + row_size_j)] = j
row_size_count += row_size_j
src_inds = heatmap_i_csr.data.argsort(is_ascend=False)
dst_pts_count = 0
heatmap_mask2_i = heatmap_mask2[i, 0]
dst_confs = heatmap_i_csr.data.zeros_like()
dst_pts = F.stack(dst_confs, dst_confs, axis=1)
for src_ind in src_inds:
src_ind_j = int(src_ind.asscalar())
col_j = int(heatmap_i_csr.indices[src_ind_j].asscalar())
row_j = int(row_inds[src_ind_j].asscalar())
pt = (row_j + pad, col_j + pad)
assert (pad <= pt[0] < heatmap_mask2_i.shape[0] - pad)
assert (pad <= pt[1] < heatmap_mask2_i.shape[1] - pad)
assert (0 <= pt[0] - pad < img_height)
assert (0 <= pt[1] - pad < img_width)
if heatmap_mask2_i[pt[0], pt[1]] == 1:
heatmap_mask2_i[(pt[0] - pad):(pt[0] + pad + 1), (pt[1] - pad):(pt[1] + pad + 1)] = 0
if (0 <= pt[0] - pad < img_height) and (0 <= pt[1] - pad < img_width):
dst_confs[dst_pts_count] = heatmap_i_csr.data[src_ind_j].asscalar()
dst_pts[dst_pts_count, 0] = row_j
dst_pts[dst_pts_count, 1] = col_j
dst_pts_count += 1
dst_confs = dst_confs[:dst_pts_count]
dst_pts = dst_pts[:dst_pts_count]
confs_list.append(dst_confs)
pts_list.append(dst_pts)
return pts_list, confs_list
class SPDescriptor(HybridBlock):
"""
SuperPointNet descriptor generator.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
descriptor_length : int, default 256
Descriptor length.
transpose_descriptors : bool, default True
Whether transpose descriptors with respect to points.
use_map_resize : bool, default True
Whether allow to resize descriptor map.
hybridizable : bool, default True
Whether allow to hybridize this block.
batch_size : int, default 1
Batch size.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
reduction : int, default 8
Feature reduction factor.
"""
def __init__(self,
in_channels,
mid_channels,
descriptor_length=256,
transpose_descriptors=True,
use_map_resize=True,
hybridizable=True,
batch_size=1,
in_size=(224, 224),
reduction=8,
**kwargs):
super(SPDescriptor, self).__init__(**kwargs)
assert ((batch_size is not None) or not hybridizable)
assert ((in_size is not None) or not hybridizable)
assert (use_map_resize or not hybridizable)
self.desc_length = descriptor_length
self.transpose_descriptors = transpose_descriptors
self.use_map_resize = use_map_resize
self.hybridizable = hybridizable
self.batch_size = batch_size
self.in_size = in_size
self.reduction = reduction
with self.name_scope():
self.head = SPHead(
in_channels=in_channels,
mid_channels=mid_channels,
out_channels=descriptor_length)
def hybrid_forward(self, F, x, pts):
coarse_desc_map = self.head(x)
coarse_desc_map = F.L2Normalization(coarse_desc_map, mode="channel")
in_size = self.in_size if self.in_size is not None else (x.shape[2] * self.reduction,
x.shape[3] * self.reduction)
if self.use_map_resize:
desc_map = F.contrib.BilinearResize2D(coarse_desc_map, height=in_size[0], width=in_size[1])
desc_map = F.L2Normalization(desc_map, mode="channel")
if not self.transpose_descriptors:
desc_map = desc_map.transpose(axes=(0, 1, 3, 2))
desc_map = desc_map.transpose(axes=(0, 2, 3, 1))
if self.hybridizable:
return desc_map
batch_size = self.batch_size if self.batch_size is not None else x.shape[0]
desc_map = desc_map.reshape(shape=(0, -3, 0))
desc_list = []
for i in range(batch_size):
desc_map_i = desc_map[i]
pts_i_tr = pts[i].transpose()
pts_ravel_i = F.ravel_multi_index(pts_i_tr, shape=in_size)
desc_map_sorted_i = F.take(desc_map_i, pts_ravel_i)
desc_list.append(desc_map_sorted_i)
return desc_list
else:
pts0 = (1.0 / self.reduction) * pts
batch_size = self.batch_size if self.batch_size is not None else x.shape[0]
desc_list = []
for i in range(batch_size):
src_desc_map_i = coarse_desc_map[i]
pts0_i = pts0[i]
dst_desc_map_i = interpolate_bilinear(
p=pts0_i,
img=src_desc_map_i,
img_size=(in_size[0] // self.reduction, in_size[1] // self.reduction))
desc_list.append(dst_desc_map_i)
return desc_list
class SuperPointNet(HybridBlock):
"""
SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
final_block_channels : int
Number of output channels for the final units.
transpose_descriptors : bool, default True
Whether transpose descriptors with respect to points.
hybridizable : bool, default True
Whether allow to hybridize this block.
batch_size : int, default 1
Batch size.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
in_channels : int, default 1
Number of input channels.
"""
def __init__(self,
channels,
final_block_channels,
transpose_descriptors=True,
hybridizable=True,
batch_size=1,
in_size=(224, 224),
in_channels=1,
**kwargs):
super(SuperPointNet, self).__init__(**kwargs)
assert ((batch_size is not None) or not hybridizable)
assert ((in_size is not None) or not hybridizable)
self.batch_size = batch_size
self.in_size = in_size
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2))
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False))
in_channels = out_channels
self.features.add(stage)
self.detector = SPDetector(
in_channels=in_channels,
mid_channels=final_block_channels,
hybridizable=hybridizable,
batch_size=batch_size,
in_size=in_size)
self.descriptor = SPDescriptor(
in_channels=in_channels,
mid_channels=final_block_channels,
transpose_descriptors=transpose_descriptors,
hybridizable=hybridizable,
batch_size=batch_size,
in_size=in_size)
def hybrid_forward(self, F, x):
x = self.features(x)
pts, confs = self.detector(x)
desc_map = self.descriptor(x, pts)
return pts, confs, desc_map
def get_superpointnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SuperPointNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels_per_layers = [64, 64, 128, 128]
layers = [2, 2, 2, 2]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
final_block_channels = 256
net = SuperPointNet(
channels=channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def superpointnet(**kwargs):
"""
SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_superpointnet(model_name="superpointnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
hybridizable = True
batch_size = 1
# in_size = (224, 224)
in_size = (200, 400)
# in_size = (1000, 2000)
models = [
superpointnet,
]
for model in models:
net = model(pretrained=pretrained, hybridizable=hybridizable, batch_size=batch_size, in_size=in_size)
ctx = mx.gpu(0)
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != superpointnet or weight_count == 1300865)
x = mx.nd.random.normal(shape=(batch_size, 1, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (len(y) == 3)
if __name__ == "__main__":
_test()
| 19,321 | 34.323583 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ibndensenet.py | """
IBN-DenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNDenseNet', 'ibn_densenet121', 'ibn_densenet161', 'ibn_densenet169', 'ibn_densenet201']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv3x3_block, IBN
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
class IBNPreConvBlock(HybridBlock):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_ibn=False,
bn_use_global_stats=False,
return_preact=False,
**kwargs):
super(IBNPreConvBlock, self).__init__(**kwargs)
self.use_ibn = use_ibn
self.return_preact = return_preact
with self.name_scope():
if self.use_ibn:
self.ibn = IBN(
channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
first_fraction=0.6,
inst_first=False)
else:
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
if self.use_ibn:
x = self.ibn(x)
else:
x = self.bn(x)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def ibn_pre_conv1x1_block(in_channels,
out_channels,
strides=1,
use_ibn=False,
bn_use_global_stats=False,
return_preact=False):
"""
1x1 version of the IBN-Net specific pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
"""
return IBNPreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_ibn=use_ibn,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact)
class IBNDenseUnit(HybridBlock):
"""
IBN-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
conv1_ibn,
**kwargs):
super(IBNDenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = ibn_pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class IBNDenseNet(HybridBlock):
"""
IBN-DenseNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IBNDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
conv1_ibn = (i < 3) and (j % 3 == 0)
stage.add(IBNDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
conv1_ibn=conv1_ibn))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ibndensenet(num_layers,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBN-DenseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported IBN-DenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = IBNDenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibn_densenet121(**kwargs):
"""
IBN-DenseNet-121 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=121, model_name="ibn_densenet121", **kwargs)
def ibn_densenet161(**kwargs):
"""
IBN-DenseNet-161 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=161, model_name="ibn_densenet161", **kwargs)
def ibn_densenet169(**kwargs):
"""
IBN-DenseNet-169 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=169, model_name="ibn_densenet169", **kwargs)
def ibn_densenet201(**kwargs):
"""
IBN-DenseNet-201 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=201, model_name="ibn_densenet201", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ibn_densenet121,
ibn_densenet161,
ibn_densenet169,
ibn_densenet201,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_densenet121 or weight_count == 7978856)
assert (model != ibn_densenet161 or weight_count == 28681000)
assert (model != ibn_densenet169 or weight_count == 14149480)
assert (model != ibn_densenet201 or weight_count == 20013928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,757 | 32.848624 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/hardnet.py | """
HarDNet for ImageNet-1K, implemented in Gluon.
Original paper: 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
"""
__all__ = ['HarDNet', 'hardnet39ds', 'hardnet68ds', 'hardnet68', 'hardnet85']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv_block
class InvDwsConvBlock(HybridBlock):
"""
Inverse depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
pw_activation : function or str or None, default nn.Activation('relu')
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
pw_activation=(lambda: nn.Activation("relu")),
dw_activation=(lambda: nn.Activation("relu")),
**kwargs):
super(InvDwsConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=pw_activation)
self.dw_conv = dwconv_block(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=dw_activation)
def hybrid_forward(self, F, x):
x = self.pw_conv(x)
x = self.dw_conv(x)
return x
def invdwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
pw_activation=(lambda: nn.Activation("relu")),
dw_activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 inverse depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
pw_activation : function or str or None, default nn.Activation('relu')
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
"""
return InvDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
pw_activation=pw_activation,
dw_activation=dw_activation,
**kwargs)
class HarDUnit(HybridBlock):
"""
HarDNet unit.
Parameters:
----------
in_channels_list : list of int
Number of input channels for each block.
out_channels_list : list of int
Number of output channels for each block.
links_list : list of list of int
List of indices for each layer.
use_deptwise : bool
Whether to use depthwise downsampling.
use_dropout : bool
Whether to use dropout module.
downsampling : bool
Whether to downsample input.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels_list,
out_channels_list,
links_list,
use_deptwise,
use_dropout,
downsampling,
bn_use_global_stats,
activation,
**kwargs):
super(HarDUnit, self).__init__(**kwargs)
self.links_list = links_list
self.use_dropout = use_dropout
self.downsampling = downsampling
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i in range(len(links_list)):
in_channels = in_channels_list[i]
out_channels = out_channels_list[i]
if use_deptwise:
unit = invdwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
pw_activation=activation,
dw_activation=None)
else:
unit = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.blocks.add(unit)
if self.use_dropout:
self.dropout = nn.Dropout(rate=0.1)
self.conv = conv1x1_block(
in_channels=in_channels_list[-1],
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.downsampling:
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels_list[-1],
out_channels=out_channels_list[-1],
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.downsample = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
layer_outs = [x]
for links_i, layer_i in zip(self.links_list, self.blocks._children.values()):
layer_in = []
for idx_ij in links_i:
layer_in.append(layer_outs[idx_ij])
if len(layer_in) > 1:
x = F.concat(*layer_in, dim=1)
else:
x = layer_in[0]
out = layer_i(x)
layer_outs.append(out)
outs = []
for i, layer_out_i in enumerate(layer_outs):
if (i == len(layer_outs) - 1) or (i % 2 == 1):
outs.append(layer_out_i)
x = F.concat(*outs, dim=1)
if self.use_dropout:
x = self.dropout(x)
x = self.conv(x)
if self.downsampling:
x = self.downsample(x)
return x
class HarDInitBlock(HybridBlock):
"""
HarDNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_deptwise : bool
Whether to use depthwise downsampling.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
use_deptwise,
bn_use_global_stats,
activation,
**kwargs):
super(HarDInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block
self.conv2 = conv2_block_class(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.downsample = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.downsample(x)
return x
class HarDNet(HybridBlock):
"""
HarDNet model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
init_block_channels : int
Number of output channels for the initial unit.
unit_in_channels : list of list of list of int
Number of input channels for each layer in each stage.
unit_out_channels : list list of of list of int
Number of output channels for each layer in each stage.
unit_links : list of list of list of int
List of indices for each layer in each stage.
use_deptwise : bool
Whether to use depthwise downsampling.
use_last_dropout : bool
Whether to use dropouts in the last unit.
output_dropout_rate : float
Parameter of Dropout layer before classifier. Faction of the input units to drop.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
init_block_channels,
unit_in_channels,
unit_out_channels,
unit_links,
use_deptwise,
use_last_dropout,
output_dropout_rate,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(HarDNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
activation = "relu6"
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(HarDInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
use_deptwise=use_deptwise,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i,
out_channels_list_i)):
use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and
use_last_dropout)
downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1))
stage.add(HarDUnit(
in_channels_list=in_channels_list_ij,
out_channels_list=out_channels_list_ij,
links_list=unit_links[i][j],
use_deptwise=use_deptwise,
use_dropout=use_dropout,
downsampling=downsampling,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
self.features.add(stage)
in_channels = unit_out_channels[-1][-1][-1]
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=output_dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_hardnet(blocks,
use_deptwise=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create HarDNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_deepwise : bool, default True
Whether to use depthwise separable version of the model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 39:
init_block_channels = 48
growth_factor = 1.6
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [4, 16, 8, 4]
channels_per_layers = [96, 320, 640, 1024]
growth_rates = [16, 20, 64, 160]
downsamples = [1, 1, 1, 0]
use_dropout = False
elif blocks == 68:
init_block_channels = 64
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [8, 16, 16, 16, 4]
channels_per_layers = [128, 256, 320, 640, 1024]
growth_rates = [14, 16, 20, 40, 160]
downsamples = [1, 0, 1, 1, 0]
use_dropout = False
elif blocks == 85:
init_block_channels = 96
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.2
layers = [8, 16, 16, 16, 16, 4]
channels_per_layers = [192, 256, 320, 480, 720, 1280]
growth_rates = [24, 24, 28, 36, 48, 256]
downsamples = [1, 0, 1, 0, 1, 0]
use_dropout = True
else:
raise ValueError("Unsupported HarDNet version with number of layers {}".format(blocks))
assert (downsamples[-1] == 0)
def calc_stage_params():
def calc_unit_params():
def calc_blocks_params(layer_idx,
base_channels,
growth_rate):
if layer_idx == 0:
return base_channels, 0, []
out_channels_ij = growth_rate
links_ij = []
for k in range(10):
dv = 2 ** k
if layer_idx % dv == 0:
t = layer_idx - dv
links_ij.append(t)
if k > 0:
out_channels_ij *= growth_factor
out_channels_ij = int(int(out_channels_ij + 1) / 2) * 2
in_channels_ij = 0
for t in links_ij:
out_channels_ik, _, _ = calc_blocks_params(
layer_idx=t,
base_channels=base_channels,
growth_rate=growth_rate)
in_channels_ij += out_channels_ik
return out_channels_ij, in_channels_ij, links_ij
unit_out_channels = []
unit_in_channels = []
unit_links = []
for num_layers, growth_rate, base_channels, channels_per_layers_i in zip(
layers, growth_rates, [init_block_channels] + channels_per_layers[:-1], channels_per_layers):
stage_out_channels_i = 0
unit_out_channels_i = []
unit_in_channels_i = []
unit_links_i = []
for j in range(num_layers):
out_channels_ij, in_channels_ij, links_ij = calc_blocks_params(
layer_idx=(j + 1),
base_channels=base_channels,
growth_rate=growth_rate)
unit_out_channels_i.append(out_channels_ij)
unit_in_channels_i.append(in_channels_ij)
unit_links_i.append(links_ij)
if (j % 2 == 0) or (j == num_layers - 1):
stage_out_channels_i += out_channels_ij
unit_in_channels_i.append(stage_out_channels_i)
unit_out_channels_i.append(channels_per_layers_i)
unit_out_channels.append(unit_out_channels_i)
unit_in_channels.append(unit_in_channels_i)
unit_links.append(unit_links_i)
return unit_out_channels, unit_in_channels, unit_links
unit_out_channels, unit_in_channels, unit_links = calc_unit_params()
stage_out_channels = []
stage_in_channels = []
stage_links = []
stage_out_channels_k = None
for i in range(len(layers)):
if stage_out_channels_k is None:
stage_out_channels_k = []
stage_in_channels_k = []
stage_links_k = []
stage_out_channels_k.append(unit_out_channels[i])
stage_in_channels_k.append(unit_in_channels[i])
stage_links_k.append(unit_links[i])
if (downsamples[i] == 1) or (i == len(layers) - 1):
stage_out_channels.append(stage_out_channels_k)
stage_in_channels.append(stage_in_channels_k)
stage_links.append(stage_links_k)
stage_out_channels_k = None
return stage_out_channels, stage_in_channels, stage_links
stage_out_channels, stage_in_channels, stage_links = calc_stage_params()
net = HarDNet(
init_block_channels=init_block_channels,
unit_in_channels=stage_in_channels,
unit_out_channels=stage_out_channels,
unit_links=stage_links,
use_deptwise=use_deptwise,
use_last_dropout=use_dropout,
output_dropout_rate=dropout_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def hardnet39ds(**kwargs):
"""
HarDNet-39DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=39, use_deptwise=True, model_name="hardnet39ds", **kwargs)
def hardnet68ds(**kwargs):
"""
HarDNet-68DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=True, model_name="hardnet68ds", **kwargs)
def hardnet68(**kwargs):
"""
HarDNet-68 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=False, model_name="hardnet68", **kwargs)
def hardnet85(**kwargs):
"""
HarDNet-85 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=85, use_deptwise=False, model_name="hardnet85", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
hardnet39ds,
hardnet68ds,
hardnet68,
hardnet85,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hardnet39ds or weight_count == 3488228)
assert (model != hardnet68ds or weight_count == 4180602)
assert (model != hardnet68 or weight_count == 17565348)
assert (model != hardnet85 or weight_count == 36670212)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 24,619 | 36.134238 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sinet.py | """
SINet for image segmentation, implemented in Gluon.
Original paper: 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
"""
__all__ = ['SINet', 'sinet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import PReLU2, conv1x1, get_activation_layer, conv1x1_block, conv3x3_block, round_channels, dwconv_block,\
InterpolationBlock, ChannelShuffle
class SEBlock(HybridBlock):
"""
SINet version of Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.Activation("relu")),
out_activation=(lambda: nn.Activation("sigmoid")),
**kwargs):
super(SEBlock, self).__init__(**kwargs)
self.use_conv2 = (reduction > 1)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
with self.name_scope():
self.fc1 = nn.Dense(
in_units=channels,
units=mid_channels)
if self.use_conv2:
self.activ = get_activation_layer(mid_activation)
self.fc2 = nn.Dense(
in_units=mid_channels,
units=channels)
self.sigmoid = get_activation_layer(out_activation)
def hybrid_forward(self, F, x):
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = F.Flatten(w)
w = self.fc1(w)
if self.use_conv2:
w = self.activ(w)
w = self.fc2(w)
w = self.sigmoid(w)
w = w.expand_dims(2).expand_dims(3).broadcast_like(x)
x = x * w
return x
class DwsConvBlock(HybridBlock):
"""
SINet version of depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_epsilon=1e-5,
dw_activation=(lambda: nn.Activation("relu")),
pw_activation=(lambda: nn.Activation("relu")),
se_reduction=0,
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
self.use_se = (se_reduction > 0)
with self.name_scope():
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=dw_use_bn,
bn_epsilon=bn_epsilon,
activation=dw_activation)
if self.use_se:
self.se = SEBlock(
channels=in_channels,
reduction=se_reduction,
round_mid=False,
mid_activation=(lambda: PReLU2(in_channels // se_reduction)),
out_activation=(lambda: PReLU2(in_channels)))
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=pw_use_bn,
bn_epsilon=bn_epsilon,
activation=pw_activation)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_epsilon=1e-5,
dw_activation=(lambda: nn.Activation("relu")),
pw_activation=(lambda: nn.Activation("relu")),
se_reduction=0,
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
dw_use_bn=dw_use_bn,
pw_use_bn=pw_use_bn,
bn_epsilon=bn_epsilon,
dw_activation=dw_activation,
pw_activation=pw_activation,
se_reduction=se_reduction,
**kwargs)
def dwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 depthwise version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
activation=activation,
**kwargs)
class FDWConvBlock(HybridBlock):
"""
Factorized depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the each convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(FDWConvBlock, self).__init__(**kwargs)
assert use_bn
self.activate = (activation is not None)
with self.name_scope():
self.v_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=None)
self.h_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=None)
if self.activate:
self.act = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.v_conv(x) + self.h_conv(x)
if self.activate:
x = self.act(x)
return x
def fdwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=activation,
**kwargs)
def fdwconv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
5x5 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=activation,
**kwargs)
class SBBlock(HybridBlock):
"""
SB-block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for a factorized depthwise separable convolution block.
scale_factor : int
Scale factor.
size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
scale_factor,
size,
bn_epsilon,
**kwargs):
super(SBBlock, self).__init__(**kwargs)
self.use_scale = (scale_factor > 1)
with self.name_scope():
if self.use_scale:
self.down_scale = nn.AvgPool2D(
pool_size=scale_factor,
strides=scale_factor)
self.up_scale = InterpolationBlock(
scale_factor=scale_factor,
out_size=size)
use_fdw = (scale_factor > 0)
if use_fdw:
fdwconv3x3_class = fdwconv3x3_block if kernel_size == 3 else fdwconv5x5_block
self.conv1 = fdwconv3x3_class(
in_channels=in_channels,
out_channels=in_channels,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(in_channels)))
else:
self.conv1 = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(in_channels)))
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
if self.use_scale:
x = self.down_scale(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_scale:
x = self.up_scale(x)
x = self.bn(x)
return x
class PreActivation(HybridBlock):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_epsilon=1e-5,
**kwargs):
super(PreActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
epsilon=bn_epsilon)
self.activ = PReLU2(in_channels)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class ESPBlock(HybridBlock):
"""
ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_sizes : list of int
Convolution window size for branches.
scale_factors : list of int
Scale factor for branches.
use_residual : bool
Whether to use residual connection.
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes,
scale_factors,
use_residual,
in_size,
bn_epsilon,
**kwargs):
super(ESPBlock, self).__init__(**kwargs)
self.use_residual = use_residual
groups = len(kernel_sizes)
mid_channels = int(out_channels / groups)
res_channels = out_channels - groups * mid_channels
with self.name_scope():
self.conv = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.branches = HybridConcurrent(axis=1, prefix="")
with self.branches.name_scope():
for i in range(groups):
out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels
self.branches.add(SBBlock(
in_channels=mid_channels,
out_channels=out_channels_i,
kernel_size=kernel_sizes[i],
scale_factor=scale_factors[i],
size=in_size,
bn_epsilon=bn_epsilon))
self.preactiv = PreActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
if self.use_residual:
identity = x
x = self.conv(x)
x = self.c_shuffle(x)
x = self.branches(x)
if self.use_residual:
x = identity + x
x = self.preactiv(x)
return x
class SBStage(HybridBlock):
"""
SB stage.
Parameters:
----------
in_channels : int
Number of input channels.
down_channels : int
Number of output channels for a downscale block.
channels_list : list of int
Number of output channels for all residual block.
kernel_sizes_list : list of int
Convolution window size for branches.
scale_factors_list : list of int
Scale factor for branches.
use_residual_list : list of int
List of flags for using residual in each ESP-block.
se_reduction : int
Squeeze reduction value (0 means no-se).
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
down_channels,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
se_reduction,
in_size,
bn_epsilon,
**kwargs):
super(SBStage, self).__init__(**kwargs)
with self.name_scope():
self.down_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=down_channels,
strides=2,
dw_use_bn=False,
bn_epsilon=bn_epsilon,
dw_activation=None,
pw_activation=(lambda: PReLU2(down_channels)),
se_reduction=se_reduction)
in_channels = down_channels
self.main_branch = nn.HybridSequential(prefix="")
with self.main_branch.name_scope():
for i, out_channels in enumerate(channels_list):
use_residual = (use_residual_list[i] == 1)
kernel_sizes = kernel_sizes_list[i]
scale_factors = scale_factors_list[i]
self.main_branch.add(ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
scale_factors=scale_factors,
use_residual=use_residual,
in_size=((in_size[0] // 2, in_size[1] // 2) if in_size else None),
bn_epsilon=bn_epsilon))
in_channels = out_channels
self.preactiv = PreActivation(
in_channels=(down_channels + in_channels),
bn_epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
x = self.down_conv(x)
y = self.main_branch(x)
x = F.concat(x, y, dim=1)
x = self.preactiv(x)
return x, y
class SBEncoderInitBlock(HybridBlock):
"""
SB encoder specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
bn_epsilon,
**kwargs):
super(SBEncoderInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(mid_channels)))
self.conv2 = dwsconv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
dw_use_bn=False,
bn_epsilon=bn_epsilon,
dw_activation=None,
pw_activation=(lambda: PReLU2(out_channels)),
se_reduction=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SBEncoder(HybridBlock):
"""
SB encoder for SINet.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of input channels.
init_block_channels : list int
Number of output channels for convolutions in the initial block.
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
init_block_channels,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
in_size,
bn_epsilon,
**kwargs):
super(SBEncoder, self).__init__(**kwargs)
with self.name_scope():
self.init_block = SBEncoderInitBlock(
in_channels=in_channels,
mid_channels=init_block_channels[0],
out_channels=init_block_channels[1],
bn_epsilon=bn_epsilon)
in_channels = init_block_channels[1]
self.stage1 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[0],
channels_list=channels_list[0],
kernel_sizes_list=kernel_sizes_list[0],
scale_factors_list=scale_factors_list[0],
use_residual_list=use_residual_list[0],
se_reduction=1,
in_size=((in_size[0] // 4, in_size[1] // 4) if in_size else None),
bn_epsilon=bn_epsilon)
in_channels = down_channels_list[0] + channels_list[0][-1]
self.stage2 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[1],
channels_list=channels_list[1],
kernel_sizes_list=kernel_sizes_list[1],
scale_factors_list=scale_factors_list[1],
use_residual_list=use_residual_list[1],
se_reduction=2,
in_size=((in_size[0] // 8, in_size[1] // 8) if in_size else None),
bn_epsilon=bn_epsilon)
in_channels = down_channels_list[1] + channels_list[1][-1]
self.output = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
y1 = self.init_block(x)
x, y2 = self.stage1(y1)
x, _ = self.stage2(x)
x = self.output(x)
return x, y2, y1
class SBDecodeBlock(HybridBlock):
"""
SB decoder block for SINet.
Parameters:
----------
channels : int
Number of output classes.
out_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
out_size,
bn_epsilon,
**kwargs):
super(SBDecodeBlock, self).__init__(**kwargs)
with self.name_scope():
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
self.bn = nn.BatchNorm(
in_channels=channels,
epsilon=bn_epsilon)
def hybrid_forward(self, F, x, y):
x = self.up(x)
x = self.bn(x)
w_conf = x.softmax()
w_max = w_conf.max(axis=1).expand_dims(1).broadcast_like(x)
x = y * (1 - w_max) + x
return x
class SBDecoder(HybridBlock):
"""
SB decoder for SINet.
Parameters:
----------
dim2 : int
Size of dimension #2.
classes : int
Number of segmentation classes.
out_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
dim2,
classes,
out_size,
bn_epsilon,
**kwargs):
super(SBDecoder, self).__init__(**kwargs)
with self.name_scope():
self.decode1 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 8, out_size[1] // 8) if out_size else None),
bn_epsilon=bn_epsilon)
self.decode2 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 4, out_size[1] // 4) if out_size else None),
bn_epsilon=bn_epsilon)
self.conv3c = conv1x1_block(
in_channels=dim2,
out_channels=classes,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(classes)))
self.output = nn.Conv2DTranspose(
channels=classes,
kernel_size=2,
strides=2,
padding=0,
output_padding=0,
in_channels=classes,
use_bias=False)
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
def hybrid_forward(self, F, y3, y2, y1):
y2 = self.conv3c(y2)
x = self.decode1(y3, y2)
x = self.decode2(x, y1)
x = self.output(x)
x = self.up(x)
return x
class SINet(HybridBlock):
"""
SINet model from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
dim2 : int
Size of dimension #2.
bn_epsilon : float
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
dim2,
bn_epsilon,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(1024, 2048),
classes=21,
**kwargs):
super(SINet, self).__init__(**kwargs)
assert (fixed_size is not None)
assert (in_channels > 0)
assert ((in_size[0] % 64 == 0) and (in_size[1] % 64 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
with self.name_scope():
init_block_channels = [16, classes]
out_channels = classes
self.encoder = SBEncoder(
in_channels=in_channels,
out_channels=out_channels,
init_block_channels=init_block_channels,
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
in_size=(in_size if fixed_size else None),
bn_epsilon=bn_epsilon)
self.decoder = SBDecoder(
dim2=dim2,
classes=classes,
out_size=(in_size if fixed_size else None),
bn_epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
y3, y2, y1 = self.encoder(x)
x = self.decoder(y3, y2, y1)
if self.aux:
return x, y3
else:
return x
def get_sinet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SINet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
kernel_sizes_list = [
[[3, 5], [3, 3], [3, 3]],
[[3, 5], [3, 3], [5, 5], [3, 5], [3, 5], [3, 5], [3, 3], [5, 5], [3, 5], [3, 5]]]
scale_factors_list = [
[[1, 1], [0, 1], [0, 1]],
[[1, 1], [0, 1], [1, 4], [2, 8], [1, 1], [1, 1], [0, 1], [1, 8], [2, 4], [0, 2]]]
chnn = 4
dims = [24] + [24 * (i + 2) + 4 * (chnn - 1) for i in range(3)]
dim1 = dims[0]
dim2 = dims[1]
dim3 = dims[2]
dim4 = dims[3]
p = len(kernel_sizes_list[0])
q = len(kernel_sizes_list[1])
channels_list = [[dim2] * p, ([dim3] * (q // 2)) + ([dim4] * (q - q // 2))]
use_residual_list = [[0] + ([1] * (p - 1)), [0] + ([1] * (q // 2 - 1)) + [0] + ([1] * (q - q // 2 - 1))]
down_channels_list = [dim1, dim2]
net = SINet(
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
dim2=dims[1],
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sinet_cityscapes(classes=19, **kwargs):
"""
SINet model for Cityscapes from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze
Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sinet(classes=classes, bn_epsilon=1e-3, model_name="sinet_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (1024, 2048)
aux = False
fixed_size = True
pretrained = False
models = [
sinet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sinet_cityscapes or weight_count == 119418)
batch = 14
x = mx.nd.zeros((batch, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape == (batch, 19, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 37,954 | 32.888393 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shufflenetv2b.py | """
ShuffleNet V2 for ImageNet-1K, implemented in Gluon. The alternative version.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ChannelShuffle2, SEBlock
class ShuffleUnit(HybridBlock):
"""
ShuffleNetV2(b) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
shuffle_group_first : bool
Whether to use channel shuffle in group first mode.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
shuffle_group_first,
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
in_channels2 = in_channels // 2
assert (in_channels % 2 == 0)
y2_in_channels = (in_channels if downsample else in_channels2)
y2_out_channels = out_channels - y2_in_channels
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=y2_in_channels,
out_channels=mid_channels)
self.dconv = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(2 if self.downsample else 1),
activation=None)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=y2_out_channels)
if self.use_se:
self.se = SEBlock(channels=y2_out_channels)
if downsample:
self.shortcut_dconv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2,
activation=None)
self.shortcut_conv = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels)
if shuffle_group_first:
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
else:
self.c_shuffle = ChannelShuffle2(
channels=out_channels,
groups=2)
def hybrid_forward(self, F, x):
if self.downsample:
y1 = self.shortcut_dconv(x)
y1 = self.shortcut_conv(y1)
x2 = x
else:
y1, x2 = F.split(x, axis=1, num_outputs=2)
y2 = self.conv1(x2)
y2 = self.dconv(y2)
y2 = self.conv2(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = F.concat(y1, y2, dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(HybridBlock):
"""
ShuffleNetV2(b) specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1,
ceil_mode=False)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2b(HybridBlock):
"""
ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
shuffle_group_first=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNetV2b, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual,
shuffle_group_first=shuffle_group_first))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenetv2b(width_scale,
shuffle_group_first=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShuffleNetV2(b) model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2b(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
shuffle_group_first=shuffle_group_first,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shufflenetv2b_wd2(**kwargs):
"""
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(12.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_wd2",
**kwargs)
def shufflenetv2b_w1(**kwargs):
"""
ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=1.0,
shuffle_group_first=True,
model_name="shufflenetv2b_w1",
**kwargs)
def shufflenetv2b_w3d2(**kwargs):
"""
ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(44.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w3d2",
**kwargs)
def shufflenetv2b_w2(**kwargs):
"""
ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(61.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w2",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
shufflenetv2b_wd2,
shufflenetv2b_w1,
shufflenetv2b_w3d2,
shufflenetv2b_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2b_wd2 or weight_count == 1366792)
assert (model != shufflenetv2b_w1 or weight_count == 2279760)
assert (model != shufflenetv2b_w3d2 or weight_count == 4410194)
assert (model != shufflenetv2b_w2 or weight_count == 7611290)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,269 | 32.00995 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sparsenet.py | """
SparseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
"""
__all__ = ['SparseNet', 'sparsenet121', 'sparsenet161', 'sparsenet169', 'sparsenet201', 'sparsenet264']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
def sparsenet_exponential_fetch(lst):
"""
SparseNet's specific exponential fetch.
Parameters:
----------
lst : list
List of something.
Returns:
-------
list
Filtered list.
"""
return [lst[len(lst) - 2**i] for i in range(1 + math.floor(math.log(len(lst), 2)))]
class SparseBlock(HybridBlock):
"""
SparseNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(SparseBlock, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
mid_channels = out_channels * bn_size
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class SparseStage(HybridBlock):
"""
SparseNet stage.
Parameters:
----------
in_channels : int
Number of input channels.
channels_per_stage : list of int
Number of output channels for each unit in stage.
growth_rate : int
Growth rate for blocks.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
do_transition : bool
Whether use transition block.
"""
def __init__(self,
in_channels,
channels_per_stage,
growth_rate,
bn_use_global_stats,
dropout_rate,
do_transition,
**kwargs):
super(SparseStage, self).__init__(**kwargs)
self.do_transition = do_transition
with self.name_scope():
if self.do_transition:
self.trans = TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats)
in_channels = in_channels // 2
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(channels_per_stage):
self.blocks.add(SparseBlock(
in_channels=in_channels,
out_channels=growth_rate,
dropout_rate=dropout_rate,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
if self.do_transition:
x = self.trans(x)
outs = [x]
for block in self.blocks._children.values():
y = block(x)
outs.append(y)
flt_outs = sparsenet_exponential_fetch(outs)
x = F.concat(*flt_outs, dim=1)
return x
class SparseNet(HybridBlock):
"""
SparseNet model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
growth_rate : int
Growth rate for blocks.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
growth_rate,
bn_use_global_stats=False,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SparseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SparseStage(
in_channels=in_channels,
channels_per_stage=channels_per_stage,
growth_rate=growth_rate,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
do_transition=(i != 0))
in_channels = channels_per_stage[-1]
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sparsenet(num_layers,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SparseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
elif num_layers == 264:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 64, 48]
else:
raise ValueError("Unsupported SparseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [sum(sparsenet_exponential_fetch([xj[0]] + [yj[0]] * (yj[1] + 1)))],
zip([growth_rate] * yi, range(yi)),
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = SparseNet(
channels=channels,
init_block_channels=init_block_channels,
growth_rate=growth_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sparsenet121(**kwargs):
"""
SparseNet-121 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=121, model_name="sparsenet121", **kwargs)
def sparsenet161(**kwargs):
"""
SparseNet-161 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=161, model_name="sparsenet161", **kwargs)
def sparsenet169(**kwargs):
"""
SparseNet-169 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=169, model_name="sparsenet169", **kwargs)
def sparsenet201(**kwargs):
"""
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
def sparsenet264(**kwargs):
"""
SparseNet-264 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=264, model_name="sparsenet264", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sparsenet121,
sparsenet161,
sparsenet169,
sparsenet201,
sparsenet264,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sparsenet121 or weight_count == 3250824)
assert (model != sparsenet161 or weight_count == 9853288)
assert (model != sparsenet169 or weight_count == 4709864)
assert (model != sparsenet201 or weight_count == 5703144)
assert (model != sparsenet264 or weight_count == 7717224)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,347 | 31.635697 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/menet.py | """
MENet for ImageNet-1K, implemented in Gluon.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle
class MEUnit(HybridBlock):
"""
MENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group,
**kwargs):
super(MEUnit, self).__init__(**kwargs)
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
with self.name_scope():
# residual branch
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups))
self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups)
self.expand_bn3 = nn.BatchNorm(in_channels=out_channels)
if downsample:
self.avgpool = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
self.activ = nn.Activation("relu")
# fusion branch
self.s_merge_conv = conv1x1(
in_channels=mid_channels,
out_channels=side_channels)
self.s_merge_bn = nn.BatchNorm(in_channels=side_channels)
self.s_conv = conv3x3(
in_channels=side_channels,
out_channels=side_channels,
strides=(2 if self.downsample else 1))
self.s_conv_bn = nn.BatchNorm(in_channels=side_channels)
self.s_evolve_conv = conv1x1(
in_channels=side_channels,
out_channels=mid_channels)
self.s_evolve_bn = nn.BatchNorm(in_channels=mid_channels)
def hybrid_forward(self, F, x):
identity = x
# pointwise group convolution 1
x = self.compress_conv1(x)
x = self.compress_bn1(x)
x = self.activ(x)
x = self.c_shuffle(x)
# merging
y = self.s_merge_conv(x)
y = self.s_merge_bn(y)
y = self.activ(y)
# depthwise convolution (bottleneck)
x = self.dw_conv2(x)
x = self.dw_bn2(x)
# evolution
y = self.s_conv(y)
y = self.s_conv_bn(y)
y = self.activ(y)
y = self.s_evolve_conv(y)
y = self.s_evolve_bn(y)
y = F.sigmoid(y)
x = x * y
# pointwise group convolution 2
x = self.expand_conv3(x)
x = self.expand_bn3(x)
# identity branch
if self.downsample:
identity = self.avgpool(identity)
x = F.concat(x, identity, dim=1)
else:
x = x + identity
x = self.activ(x)
return x
class MEInitBlock(HybridBlock):
"""
MENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(MEInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(in_channels=out_channels)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class MENet(HybridBlock):
"""
MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MENet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(MEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add(MEUnit(
in_channels=in_channels,
out_channels=out_channels,
side_channels=side_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = MENet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
# menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,113 | 33.365462 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/voca.py | """
VOCA for speech-driven facial animation, implemented in Gluon.
Original paper: 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
"""
__all__ = ['VOCA', 'voca8flame']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock
class VocaEncoder(HybridBlock):
"""
VOCA encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
base_persons : int
Number of base persons (subjects).
encoder_features : int
Number of encoder features.
"""
def __init__(self,
audio_features,
audio_window_size,
base_persons,
encoder_features,
**kwargs):
super(VocaEncoder, self).__init__(**kwargs)
self.audio_window_size = audio_window_size
channels = (32, 32, 64, 64)
fc1_channels = 128
with self.name_scope():
self.bn = nn.BatchNorm(in_channels=1)
in_channels = audio_features + base_persons
self.branch = nn.HybridSequential(prefix="")
with self.branch.name_scope():
for out_channels in channels:
self.branch.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=(2, 1),
padding=(1, 0),
use_bias=True,
use_bn=False))
in_channels = out_channels
in_channels += base_persons
self.fc1 = nn.Dense(
units=fc1_channels,
in_units=in_channels)
self.fc2 = nn.Dense(
units=encoder_features,
in_units=fc1_channels)
def hybrid_forward(self, F, x, pid):
x = self.bn(x)
x = x.swapaxes(1, 3)
y = pid.expand_dims(-1).expand_dims(-1)
y = y.tile(reps=(1, 1, self.audio_window_size, 1))
x = F.concat(x, y, dim=1)
x = self.branch(x)
x = x.flatten()
x = F.concat(x, pid, dim=1)
x = self.fc1(x)
x = x.tanh()
x = self.fc2(x)
return x
class VOCA(HybridBlock):
"""
VOCA model from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
base_persons : int, default 8
Number of base persons (subjects).
encoder_features : int, default 50
Number of encoder features.
vertices : int, default 5023
Number of 3D geometry vertices.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
base_persons=8,
encoder_features=50,
vertices=5023,
**kwargs):
super(VOCA, self).__init__(**kwargs)
self.base_persons = base_persons
with self.name_scope():
self.encoder = VocaEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
base_persons=base_persons,
encoder_features=encoder_features)
self.decoder = nn.Dense(
units=(3 * vertices),
in_units=encoder_features)
def hybrid_forward(self, F, x, pid):
pid = pid.one_hot(depth=self.base_persons)
x = self.encoder(x, pid)
x = self.decoder(x)
x = x.reshape((0, 1, -1, 3))
return x
def get_voca(base_persons,
vertices,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create VOCA model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
vertices : int
Number of 3D geometry vertices.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = VOCA(
base_persons=base_persons,
vertices=vertices,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def voca8flame(**kwargs):
"""
VOCA-8-FLAME model for 8 base persons and FLAME topology from 'Capture, Learning, and Synthesis of 3D Speaking
Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_voca(base_persons=8, vertices=5023, model_name="voca8flame", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
voca8flame,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != voca8flame or weight_count == 809563)
batch = 14
audio_features = 29
audio_window_size = 16
vertices = 5023
x = mx.nd.random.normal(shape=(batch, 1, audio_window_size, audio_features), ctx=ctx)
pid = mx.nd.array(np.full(shape=(batch,), fill_value=3), ctx=ctx)
y = net(x, pid)
assert (y.shape == (batch, 1, vertices, 3))
if __name__ == "__main__":
_test()
| 6,959 | 29.79646 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shakeshakeresnet_cifar.py | """
Shake-Shake-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
"""
__all__ = ['CIFARShakeShakeResNet', 'shakeshakeresnet20_2x16d_cifar10', 'shakeshakeresnet20_2x16d_cifar100',
'shakeshakeresnet20_2x16d_svhn', 'shakeshakeresnet26_2x32d_cifar10', 'shakeshakeresnet26_2x32d_cifar100',
'shakeshakeresnet26_2x32d_svhn']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeShake(mx.autograd.Function):
"""
Shake-Shake function.
"""
def forward(self, x1, x2):
if mx.autograd.is_training():
alpha = mx.nd.random.uniform_like(x1.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)))
y = mx.nd.broadcast_mul(alpha, x1) + mx.nd.broadcast_mul(1 - alpha, x2)
else:
y = 0.5 * (x1 + x2)
return y
def backward(self, dy):
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)))
return mx.nd.broadcast_mul(beta, dy), mx.nd.broadcast_mul(1 - beta, dy)
class ShakeShakeShortcut(HybridBlock):
"""
Shake-Shake-ResNet shortcut.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(ShakeShakeShortcut, self).__init__(**kwargs)
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=1,
strides=strides)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x1 = self.pool(x)
x1 = self.conv1(x1)
x2 = F.slice(x, begin=(None, None, None, None), end=(None, None, -1, -1))
x2 = F.pad(x2, mode="constant", pad_width=(0, 0, 0, 0, 1, 0, 1, 0), constant_value=0)
x2 = self.pool(x2)
x2 = self.conv2(x2)
x = F.concat(x1, x2, dim=1)
x = self.bn(x)
return x
class ShakeShakeResUnit(HybridBlock):
"""
Shake-Shake-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(ShakeShakeResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
branch_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.branch1 = branch_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.branch2 = branch_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_branch = ShakeShakeShortcut(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_branch(x)
else:
identity = x
x1 = self.branch1(x)
x2 = self.branch2(x)
x = ShakeShake()(x1, x2) + identity
x = self.activ(x)
return x
class CIFARShakeShakeResNet(HybridBlock):
"""
Shake-Shake-ResNet model for CIFAR from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeShakeResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeShakeResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakeshakeresnet_cifar(classes,
blocks,
bottleneck,
first_stage_channels=16,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Shake-Shake-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
first_stage_channels : int, default 16
Number of output channels for the first stage.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
from functools import reduce
channels_per_layers = reduce(lambda x, y: x + [x[-1] * 2], range(2), [first_stage_channels])
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARShakeShakeResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakeshakeresnet20_2x16d_cifar10(classes=10, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_cifar10", **kwargs)
def shakeshakeresnet20_2x16d_cifar100(classes=100, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_cifar100", **kwargs)
def shakeshakeresnet20_2x16d_svhn(classes=10, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_svhn", **kwargs)
def shakeshakeresnet26_2x32d_cifar10(classes=10, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_cifar10", **kwargs)
def shakeshakeresnet26_2x32d_cifar100(classes=100, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_cifar100", **kwargs)
def shakeshakeresnet26_2x32d_svhn(classes=10, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakeshakeresnet20_2x16d_cifar10, 10),
(shakeshakeresnet20_2x16d_cifar100, 100),
(shakeshakeresnet20_2x16d_svhn, 10),
(shakeshakeresnet26_2x32d_cifar10, 10),
(shakeshakeresnet26_2x32d_cifar100, 100),
(shakeshakeresnet26_2x32d_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakeshakeresnet20_2x16d_cifar10 or weight_count == 541082)
assert (model != shakeshakeresnet20_2x16d_cifar100 or weight_count == 546932)
assert (model != shakeshakeresnet20_2x16d_svhn or weight_count == 541082)
assert (model != shakeshakeresnet26_2x32d_cifar10 or weight_count == 2923162)
assert (model != shakeshakeresnet26_2x32d_cifar100 or weight_count == 2934772)
assert (model != shakeshakeresnet26_2x32d_svhn or weight_count == 2923162)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 16,328 | 35.612108 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/wrn_cifar.py | """
WRN for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10',
'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARWRN(HybridBlock):
"""
WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARWRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=False,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_wrn_cifar(classes,
blocks,
width_factor,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARWRN(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def wrn16_10_cifar10(classes=10, **kwargs):
"""
WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs)
def wrn16_10_cifar100(classes=100, **kwargs):
"""
WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs)
def wrn16_10_svhn(classes=10, **kwargs):
"""
WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs)
def wrn28_10_cifar10(classes=10, **kwargs):
"""
WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs)
def wrn28_10_cifar100(classes=100, **kwargs):
"""
WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs)
def wrn28_10_svhn(classes=10, **kwargs):
"""
WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs)
def wrn40_8_cifar10(classes=10, **kwargs):
"""
WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs)
def wrn40_8_cifar100(classes=100, **kwargs):
"""
WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs)
def wrn40_8_svhn(classes=10, **kwargs):
"""
WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(wrn16_10_cifar10, 10),
(wrn16_10_cifar100, 100),
(wrn16_10_svhn, 10),
(wrn28_10_cifar10, 10),
(wrn28_10_cifar100, 100),
(wrn28_10_svhn, 10),
(wrn40_8_cifar10, 10),
(wrn40_8_cifar100, 100),
(wrn40_8_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn16_10_cifar10 or weight_count == 17116634)
assert (model != wrn16_10_cifar100 or weight_count == 17174324)
assert (model != wrn16_10_svhn or weight_count == 17116634)
assert (model != wrn28_10_cifar10 or weight_count == 36479194)
assert (model != wrn28_10_cifar100 or weight_count == 36536884)
assert (model != wrn28_10_svhn or weight_count == 36479194)
assert (model != wrn40_8_cifar10 or weight_count == 35748314)
assert (model != wrn40_8_cifar100 or weight_count == 35794484)
assert (model != wrn40_8_svhn or weight_count == 35748314)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 12,476 | 34.245763 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/inceptionresnetv2.py | """
InceptionResNetV2 for ImageNet-1K, implemented in Gluon.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV2', 'inceptionresnetv2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1_block, conv3x3_block
from .inceptionv3 import AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
from .inceptionresnetv1 import InceptionAUnit, InceptionBUnit, InceptionCUnit, ReductionAUnit, ReductionBUnit
class InceptBlock5b(HybridBlock):
"""
InceptionResNetV2 type Mixed-5b block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock5b, self).__init__(**kwargs)
in_channels = 192
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(48, 64),
kernel_size_list=(1, 5),
strides_list=(1, 1),
padding_list=(0, 2),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=64,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionResNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool1 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool2 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.block = InceptBlock5b(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool1(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool2(x)
x = self.block(x)
return x
class InceptionResNetV2(HybridBlock):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionResNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
layers = [10, 21, 11]
in_channels_list = [320, 1088, 2080]
normal_out_channels_list = [[32, 32, 32, 32, 48, 64], [192, 128, 160, 192], [192, 192, 224, 256]]
reduction_out_channels_list = [[384, 256, 256, 384], [256, 384, 256, 288, 256, 288, 320]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=2080,
out_channels=1536,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=1536))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionresnetv2(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionResNetV2 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV2(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionresnetv2(**kwargs):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_epsilon=1e-3, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionresnetv2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv2 or weight_count == 55843464)
x = mx.nd.zeros((1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,625 | 34.772308 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ghostnet.py | """
GhostNet for ImageNet-1K, implemented in Gluon.
Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
"""
__all__ = ['GhostNet', 'ghostnet']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
dwsconv3x3_block, SEBlock
class GhostHSigmoid(HybridBlock):
"""
Approximated sigmoid function, specific for GhostNet.
"""
def __init__(self, **kwargs):
super(GhostHSigmoid, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.clip(x, 0.0, 1.0)
class GhostConvBlock(HybridBlock):
"""
GhostNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
activation : function or str or None, default default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(GhostConvBlock, self).__init__(**kwargs)
main_out_channels = math.ceil(0.5 * out_channels)
cheap_out_channels = out_channels - main_out_channels
with self.name_scope():
self.main_conv = conv1x1_block(
in_channels=in_channels,
out_channels=main_out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.cheap_conv = dwconv3x3_block(
in_channels=main_out_channels,
out_channels=cheap_out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
def hybrid_forward(self, F, x):
x = self.main_conv(x)
y = self.cheap_conv(x)
return F.concat(x, y, dim=1)
class GhostExpBlock(HybridBlock):
"""
GhostNet expansion block for residual path in GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
bn_use_global_stats=False,
**kwargs):
super(GhostExpBlock, self).__init__(**kwargs)
self.use_dw_conv = (strides != 1)
self.use_se = use_se
mid_channels = int(math.ceil(exp_factor * in_channels))
with self.name_scope():
self.exp_conv = GhostConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dw_conv:
dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block
self.dw_conv = dw_conv_class(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
out_activation=GhostHSigmoid())
self.pw_conv = GhostConvBlock(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.exp_conv(x)
if self.use_dw_conv:
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
return x
class GhostUnit(HybridBlock):
"""
GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
bn_use_global_stats=False,
**kwargs):
super(GhostUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = GhostExpBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
pw_activation=None)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class GhostClassifier(HybridBlock):
"""
GhostNet classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
**kwargs):
super(GhostClassifier, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class GhostNet(HybridBlock):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
exp_factors,
use_se,
first_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(GhostNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
use_se_flag = use_se[i][j] == 1
stage.add(GhostUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se_flag,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(GhostClassifier(
in_channels=in_channels,
out_channels=classes,
mid_channels=classifier_mid_channels))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ghostnet(width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create GhostNet model with specific parameters.
Parameters:
----------
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 16
channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]]
kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]]
use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]]
final_block_channels = 960
classifier_mid_channels = 1280
first_stride = False
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale, divisor=4)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale, divisor=4)
net = GhostNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
exp_factors=exp_factors,
use_se=use_se,
first_stride=first_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ghostnet(**kwargs):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ghostnet(model_name="ghostnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ghostnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ghostnet or weight_count == 5180840)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,156 | 33.060674 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/efficientnet.py | """
EfficientNet for ImageNet-1K, implemented in Gluon.
Original papers:
- 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946,
- 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665.
"""
__all__ = ['EfficientNet', 'calc_tf_padding', 'EffiInvResUnit', 'EffiInitBlock', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6',
'efficientnet_b7', 'efficientnet_b8', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b',
'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b',
'efficientnet_b0c', 'efficientnet_b1c', 'efficientnet_b2c', 'efficientnet_b3c', 'efficientnet_b4c',
'efficientnet_b5c', 'efficientnet_b6c', 'efficientnet_b7c', 'efficientnet_b8c']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock
def calc_tf_padding(in_size,
kernel_size,
strides=1,
dilation=1):
"""
Calculate TF-same like padding size.
Parameters:
----------
in_size : tuple of 2 int
Spatial size of input image.
kernel_size : int
Convolution window size.
strides : int, default 1
Strides of the convolution.
dilation : int, default 1
Dilation value for convolution layer.
Returns:
-------
tuple of 4 int
The size of the padding.
"""
height, width = in_size
oh = math.ceil(height / strides)
ow = math.ceil(width / strides)
pad_h = max((oh - 1) * strides + (kernel_size - 1) * dilation + 1 - height, 0)
pad_w = max((ow - 1) * strides + (kernel_size - 1) * dilation + 1 - width, 0)
return 0, 0, 0, 0, pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2
class EffiDwsConvUnit(HybridBlock):
"""
EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
in_size : tuple of 2 int, default None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_epsilon,
bn_use_global_stats,
activation,
tf_mode,
in_size=None,
**kwargs):
super(EffiDwsConvUnit, self).__init__(**kwargs)
self.tf_mode = tf_mode
self.in_size = in_size
self.residual = (in_channels == out_channels) and (strides == 1)
with self.name_scope():
self.dw_conv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
padding=(0 if tf_mode else 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.se = SEBlock(
channels=in_channels,
reduction=4,
mid_activation=activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.tf_mode:
in_size = self.in_size if self.in_size is not None else x.shape[2:]
# assert (in_size == x.shape[2:])
x = F.pad(x, mode="constant", pad_width=calc_tf_padding(in_size=in_size, kernel_size=3), constant_value=0)
x = self.dw_conv(x)
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = x + identity
return x
class EffiInvResUnit(HybridBlock):
"""
EfficientNet inverted residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
in_size : tuple of 2 int, default None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
exp_factor,
se_factor,
bn_epsilon,
bn_use_global_stats,
activation,
tf_mode,
in_size=None,
**kwargs):
super(EffiInvResUnit, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.strides = strides
self.tf_mode = tf_mode
self.in_size = in_size
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor
dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.conv2 = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=(0 if tf_mode else (kernel_size // 2)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.tf_mode:
in_size = self.in_size if self.in_size is not None else x.shape[2:]
# assert (in_size == x.shape[2:])
x = F.pad(x,
mode="constant",
pad_width=calc_tf_padding(in_size=in_size, kernel_size=self.kernel_size, strides=self.strides),
constant_value=0)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class EffiInitBlock(HybridBlock):
"""
EfficientNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
in_size : tuple of 2 int, default None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
activation,
tf_mode,
in_size=None,
**kwargs):
super(EffiInitBlock, self).__init__(**kwargs)
self.tf_mode = tf_mode
self.in_size = in_size
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=(0 if tf_mode else 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
def hybrid_forward(self, F, x):
if self.tf_mode:
in_size = self.in_size if self.in_size is not None else x.shape[2:]
# assert (in_size == x.shape[2:])
x = F.pad(x,
mode="constant",
pad_width=calc_tf_padding(in_size=in_size, kernel_size=3, strides=2),
constant_value=0)
x = self.conv(x)
return x
class EfficientNet(HybridBlock):
"""
EfficientNet(-B0) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
fixed_size=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(EfficientNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
activation = "swish"
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode,
in_size=in_size if fixed_size else None))
in_channels = init_block_channels
in_size = (math.ceil(in_size[0] / 2), math.ceil(in_size[1] / 2))
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i == 0:
stage.add(EffiDwsConvUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode,
in_size=in_size if fixed_size else None))
else:
stage.add(EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
exp_factor=expansion_factor,
se_factor=4,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode,
in_size=in_size if fixed_size else None))
in_channels = out_channels
if strides > 1:
in_size = (math.ceil(in_size[0] / 2), math.ceil(in_size[1] / 2))
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = final_block_channels
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_efficientnet(version,
in_size,
tf_mode=False,
bn_epsilon=1e-5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create EfficientNet model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('b0'...'b7').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "b0":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
dropout_rate = 0.2
elif version == "b1":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
dropout_rate = 0.2
elif version == "b2":
assert (in_size == (260, 260))
depth_factor = 1.2
width_factor = 1.1
dropout_rate = 0.3
elif version == "b3":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
dropout_rate = 0.3
elif version == "b4":
assert (in_size == (380, 380))
depth_factor = 1.8
width_factor = 1.4
dropout_rate = 0.4
elif version == "b5":
assert (in_size == (456, 456))
depth_factor = 2.2
width_factor = 1.6
dropout_rate = 0.4
elif version == "b6":
assert (in_size == (528, 528))
depth_factor = 2.6
width_factor = 1.8
dropout_rate = 0.5
elif version == "b7":
assert (in_size == (600, 600))
depth_factor = 3.1
width_factor = 2.0
dropout_rate = 0.5
elif version == "b8":
assert (in_size == (672, 672))
depth_factor = 3.6
width_factor = 2.2
dropout_rate = 0.5
else:
raise ValueError("Unsupported EfficientNet version {}".format(version))
init_block_channels = 32
layers = [1, 2, 2, 3, 3, 4, 1]
downsample = [1, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 40, 80, 112, 192, 320]
expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3]
strides_per_stage = [1, 2, 2, 2, 1, 2, 1]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_epsilon=bn_epsilon,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def efficientnet_b0(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs)
def efficientnet_b1(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs)
def efficientnet_b2(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs)
def efficientnet_b3(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs)
def efficientnet_b4(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs)
def efficientnet_b5(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs)
def efficientnet_b6(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs)
def efficientnet_b7(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs)
def efficientnet_b8(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, model_name="efficientnet_b8", **kwargs)
def efficientnet_b0b(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b0b",
**kwargs)
def efficientnet_b1b(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b1b",
**kwargs)
def efficientnet_b2b(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b2b",
**kwargs)
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b",
**kwargs)
def efficientnet_b4b(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4b",
**kwargs)
def efficientnet_b5b(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b5b",
**kwargs)
def efficientnet_b6b(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b6b",
**kwargs)
def efficientnet_b7b(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b7b",
**kwargs)
def efficientnet_b0c(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b0c",
**kwargs)
def efficientnet_b1c(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b1c",
**kwargs)
def efficientnet_b2c(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b2c",
**kwargs)
def efficientnet_b3c(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3c",
**kwargs)
def efficientnet_b4c(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4c",
**kwargs)
def efficientnet_b5c(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b5c",
**kwargs)
def efficientnet_b6c(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b6c",
**kwargs)
def efficientnet_b7c(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b7c",
**kwargs)
def efficientnet_b8c(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b8c",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
models = [
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_b8,
efficientnet_b0b,
efficientnet_b1b,
efficientnet_b2b,
efficientnet_b3b,
efficientnet_b4b,
efficientnet_b5b,
efficientnet_b6b,
efficientnet_b7b,
efficientnet_b0c,
efficientnet_b1c,
efficientnet_b2c,
efficientnet_b3c,
efficientnet_b4c,
efficientnet_b5c,
efficientnet_b6c,
efficientnet_b7c,
efficientnet_b8c,
]
for model in models:
net = model(pretrained=pretrained, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_b0 or weight_count == 5288548)
assert (model != efficientnet_b1 or weight_count == 7794184)
assert (model != efficientnet_b2 or weight_count == 9109994)
assert (model != efficientnet_b3 or weight_count == 12233232)
assert (model != efficientnet_b4 or weight_count == 19341616)
assert (model != efficientnet_b5 or weight_count == 30389784)
assert (model != efficientnet_b6 or weight_count == 43040704)
assert (model != efficientnet_b7 or weight_count == 66347960)
assert (model != efficientnet_b8 or weight_count == 87413142)
assert (model != efficientnet_b0b or weight_count == 5288548)
assert (model != efficientnet_b1b or weight_count == 7794184)
assert (model != efficientnet_b2b or weight_count == 9109994)
assert (model != efficientnet_b3b or weight_count == 12233232)
assert (model != efficientnet_b4b or weight_count == 19341616)
assert (model != efficientnet_b5b or weight_count == 30389784)
assert (model != efficientnet_b6b or weight_count == 43040704)
assert (model != efficientnet_b7b or weight_count == 66347960)
batch = 4
classes = 1000
x = mx.nd.random.normal(shape=(batch, 3, net.in_size[0], net.in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes))
if __name__ == "__main__":
_test()
| 44,055 | 37.713533 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/channelnet.py | """
ChannelNet for ImageNet-1K, implemented in Gluon.
Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,'
https://arxiv.org/abs/1809.01330.
"""
__all__ = ['ChannelNet', 'channelnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ReLU6
def dwconv3x3(in_channels,
out_channels,
strides,
use_bias=False):
"""
3x3 depthwise version of the standard convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
groups=out_channels,
use_bias=use_bias,
in_channels=in_channels)
class ChannetConv(HybridBlock):
"""
ChannelNet specific convolution block with Batch normalization and ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
**kwargs):
super(ChannetConv, self).__init__(**kwargs)
self.use_dropout = (dropout_rate > 0.0)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
self.bn = nn.BatchNorm(
in_channels=out_channels)
if self.activate:
self.activ = ReLU6()
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def channet_conv1x1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True):
"""
1x1 version of ChannelNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
return ChannetConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate)
def channet_conv3x3(in_channels,
out_channels,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
return ChannetConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate)
class ChannetDwsConvBlock(HybridBlock):
"""
ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 1
Number of groups.
dropout_rate : float, default 0.0
Dropout rate.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups=1,
dropout_rate=0.0,
**kwargs):
super(ChannetDwsConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = dwconv3x3(
in_channels=in_channels,
out_channels=in_channels,
strides=strides)
self.pw_conv = channet_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
dropout_rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class SimpleGroupBlock(HybridBlock):
"""
ChannelNet specific block with a sequence of depthwise separable group convolution layers.
Parameters:
----------
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
channels,
multi_blocks,
groups,
dropout_rate,
**kwargs):
super(SimpleGroupBlock, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i in range(multi_blocks):
self.blocks.add(ChannetDwsConvBlock(
in_channels=channels,
out_channels=channels,
strides=1,
groups=groups,
dropout_rate=dropout_rate))
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class ChannelwiseConv2d(HybridBlock):
"""
ChannelNet specific block with channel-wise convolution.
Parameters:
----------
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
groups,
dropout_rate,
**kwargs):
super(ChannelwiseConv2d, self).__init__(**kwargs)
self.use_dropout = (dropout_rate > 0.0)
with self.name_scope():
self.conv = nn.Conv3D(
channels=groups,
kernel_size=(4 * groups, 1, 1),
strides=(groups, 1, 1),
padding=(2 * groups - 1, 0, 0),
use_bias=False,
in_channels=1)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = x.expand_dims(axis=1)
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = x.reshape((0, -3, -2))
return x
class ConvGroupBlock(HybridBlock):
"""
ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions.
Parameters:
----------
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
channels,
multi_blocks,
groups,
dropout_rate,
**kwargs):
super(ConvGroupBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = ChannelwiseConv2d(
groups=groups,
dropout_rate=dropout_rate)
self.block = SimpleGroupBlock(
channels=channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.block(x)
return x
class ChannetUnit(nn.HybridBlock):
"""
ChannelNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : tuple/list of 2 int
Number of output channels for each sub-block.
strides : int or tuple/list of 2 int
Strides of the convolution.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
block_names : tuple/list of 2 str
Sub-block names.
merge_type : str
Type of sub-block output merging.
"""
def __init__(self,
in_channels,
out_channels_list,
strides,
multi_blocks,
groups,
dropout_rate,
block_names,
merge_type,
**kwargs):
super(ChannetUnit, self).__init__(**kwargs)
assert (len(block_names) == 2)
assert (merge_type in ["seq", "add", "cat"])
self.merge_type = merge_type
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)):
strides_i = (strides if i == 0 else 1)
if block_name == "channet_conv3x3":
self.blocks.add(channet_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate,
activate=False))
elif block_name == "channet_dws_conv_block":
self.blocks.add(ChannetDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate))
elif block_name == "simple_group_block":
self.blocks.add(SimpleGroupBlock(
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate))
elif block_name == "conv_group_block":
self.blocks.add(ConvGroupBlock(
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate))
else:
raise NotImplementedError()
in_channels = out_channels
def hybrid_forward(self, F, x):
x_outs = []
for block in self.blocks._children.values():
x = block(x)
x_outs.append(x)
if self.merge_type == "add":
for i in range(len(x_outs) - 1):
x = x + x_outs[i]
elif self.merge_type == "cat":
x = F.concat(*x_outs, dim=1)
return x
class ChannelNet(HybridBlock):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
channels : list of list of list of int
Number of output channels for each unit.
block_names : list of list of list of str
Names of blocks for each unit.
block_names : list of list of str
Merge types for each unit.
dropout_rate : float, default 0.0001
Dropout rate.
multi_blocks : int, default 2
Block count architectural parameter.
groups : int, default 2
Group count architectural parameter.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
block_names,
merge_types,
dropout_rate=0.0001,
multi_blocks=2,
groups=2,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ChannelNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
stage.add(ChannetUnit(
in_channels=in_channels,
out_channels_list=out_channels,
strides=strides,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
block_names=block_names[i][j],
merge_type=merge_types[i][j]))
if merge_types[i][j] == "cat":
in_channels = sum(out_channels)
else:
in_channels = out_channels[-1]
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_channelnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ChannelNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]]
block_names = [[["channet_conv3x3", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]]]
merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]]
net = ChannelNet(
channels=channels,
block_names=block_names,
merge_types=merge_types,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def channelnet(**kwargs):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_channelnet(model_name="channelnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
channelnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != channelnet or weight_count == 3875112)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 19,449 | 30.677524 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pnasnet.py | """
PNASNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
"""
__all__ = ['PNASNet', 'pnasnet5large']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1
from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock,\
process_with_padding
class PnasMaxPoolBlock(HybridBlock):
"""
PNASNet specific Max pooling layer with extra padding.
Parameters:
----------
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
strides=2,
extra_padding=False,
**kwargs):
super(PnasMaxPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=3,
strides=strides,
padding=1)
def hybrid_forward(self, F, x):
if self.extra_padding:
x = process_with_padding(x, F, self.pool)
else:
x = self.pool(x)
return x
def pnas_conv1x1(in_channels,
out_channels,
strides=1):
"""
1x1 version of the PNASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=1)
class DwsBranch(HybridBlock):
"""
PNASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
extra_padding=False,
stem=False,
**kwargs):
super(DwsBranch, self).__init__(**kwargs)
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
padding = kernel_size // 2
with self.name_scope():
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
extra_padding=extra_padding)
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dws_branch_k3(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False):
"""
3x3 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k5(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False):
"""
5x5 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k7(in_channels,
out_channels,
strides=2,
extra_padding=False):
"""
7x7 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
extra_padding=extra_padding,
stem=False)
class PnasMaxPathBlock(HybridBlock):
"""
PNASNet specific `max path` auxiliary block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(PnasMaxPathBlock, self).__init__(**kwargs)
with self.name_scope():
self.maxpool = PnasMaxPoolBlock()
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.bn = nasnet_batch_norm(channels=out_channels)
def hybrid_forward(self, F, x):
x = self.maxpool(x)
x = self.conv(x)
x = self.bn(x)
return x
class PnasBaseUnit(HybridBlock):
"""
PNASNet base unit.
"""
def __init__(self,
**kwargs):
super(PnasBaseUnit, self).__init__(**kwargs)
def cell_forward(self, F, x, x_prev):
assert (hasattr(self, 'comb0_left'))
x_left = x_prev
x_right = x
x0 = self.comb0_left(x_left) + self.comb0_right(x_left)
x1 = self.comb1_left(x_right) + self.comb1_right(x_right)
x2 = self.comb2_left(x_right) + self.comb2_right(x_right)
x3 = self.comb3_left(x2) + self.comb3_right(x_right)
x4 = self.comb4_left(x_left) + (self.comb4_right(x_right) if self.comb4_right else x_right)
x_out = F.concat(x0, x1, x2, x3, x4, dim=1)
return x_out
class Stem1Unit(PnasBaseUnit):
"""
PNASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 5
with self.name_scope():
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb0_right = PnasMaxPathBlock(
in_channels=in_channels,
out_channels=mid_channels)
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_right = PnasMaxPoolBlock()
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1)
self.comb3_right = PnasMaxPoolBlock()
self.comb4_left = dws_branch_k3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2)
def hybrid_forward(self, F, x):
x_prev = x
x = self.conv_1x1(x)
x_out = self.cell_forward(F, x, x_prev)
return x_out
class PnasUnit(PnasBaseUnit):
"""
PNASNet ordinary unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
reduction : bool, default False
Whether to use reduction.
extra_padding : bool, default False
Whether to use extra padding.
match_prev_layer_dimensions : bool, default False
Whether to match previous layer dimensions.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
reduction=False,
extra_padding=False,
match_prev_layer_dimensions=False,
**kwargs):
super(PnasUnit, self).__init__(**kwargs)
mid_channels = out_channels // 5
stride = 2 if reduction else 1
with self.name_scope():
if match_prev_layer_dimensions:
self.conv_prev_1x1 = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels)
else:
self.conv_prev_1x1 = pnas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb0_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding)
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb1_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding)
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1)
self.comb3_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding)
self.comb4_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
if reduction:
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride)
else:
self.comb4_right = None
def hybrid_forward(self, F, x, x_prev):
x_prev = self.conv_prev_1x1(x_prev)
x = self.conv_1x1(x)
x_out = self.cell_forward(F, x, x_prev)
return x_out
class PNASNet(HybridBlock):
"""
PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem1_blocks_channels : list of 2 int
Number of output channels for the Stem1 unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
stem1_blocks_channels,
in_channels=3,
in_size=(331, 331),
classes=1000,
**kwargs):
super(PNASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=2)
self.features.add(NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=stem1_blocks_channels))
prev_in_channels = in_channels
in_channels = stem1_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
reduction = (j == 0)
extra_padding = (j == 0) and (i not in [0, 2])
match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0))
stage.add(PnasUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
reduction=reduction,
extra_padding=extra_padding,
match_prev_layer_dimensions=match_prev_layer_dimensions))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.Activation("relu"))
self.features.add(nn.AvgPool2D(
pool_size=11,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=0.5))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_pnasnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PNASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
repeat = 4
init_block_channels = 96
stem_blocks_channels = [270, 540]
norm_channels = [1080, 2160, 4320]
channels = [[ci] * repeat for ci in norm_channels]
stem1_blocks_channels = stem_blocks_channels[0]
channels[0] = [stem_blocks_channels[1]] + channels[0]
net = PNASNet(
channels=channels,
init_block_channels=init_block_channels,
stem1_blocks_channels=stem1_blocks_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def pnasnet5large(**kwargs):
"""
PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pnasnet(model_name="pnasnet5large", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
pnasnet5large,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pnasnet5large or weight_count == 86057668)
x = mx.nd.zeros((1, 3, 331, 331), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 18,996 | 30.091653 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/efficientnetedge.py | """
EfficientNet-Edge for ImageNet-1K, implemented in Gluon.
Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
"""
__all__ = ['EfficientNetEdge', 'efficientnet_edge_small_b', 'efficientnet_edge_medium_b', 'efficientnet_edge_large_b']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1_block, conv3x3_block, SEBlock
from .efficientnet import EffiInvResUnit, EffiInitBlock
class EffiEdgeResUnit(HybridBlock):
"""
EfficientNet-Edge edge residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
mid_from_in : bool
Whether to use input channel count for middle channel count calculation.
use_skip : bool
Whether to use skip connection.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
exp_factor,
se_factor,
mid_from_in,
use_skip,
bn_epsilon,
bn_use_global_stats,
activation,
**kwargs):
super(EffiEdgeResUnit, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor if mid_from_in else out_channels * exp_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class EfficientNetEdge(HybridBlock):
"""
EfficientNet-Edge model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(EfficientNetEdge, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
activation = "relu"
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
mid_from_in = (i != 0)
use_skip = (i != 0)
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i < 3:
stage.add(EffiEdgeResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
exp_factor=expansion_factor,
se_factor=0,
mid_from_in=mid_from_in,
use_skip=use_skip,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
else:
stage.add(EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
exp_factor=expansion_factor,
se_factor=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = final_block_channels
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_efficientnet_edge(version,
in_size,
tf_mode=False,
bn_epsilon=1e-5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create EfficientNet-Edge model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('small', 'medium', 'large').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
dropout_rate = 0.0
if version == "small":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
# dropout_rate = 0.2
elif version == "medium":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
# dropout_rate = 0.2
elif version == "large":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
# dropout_rate = 0.3
else:
raise ValueError("Unsupported EfficientNet-Edge version {}".format(version))
init_block_channels = 32
layers = [1, 2, 4, 5, 4, 2]
downsample = [1, 1, 1, 1, 0, 1]
channels_per_layers = [24, 32, 48, 96, 144, 192]
expansion_factors_per_layers = [4, 8, 8, 8, 8, 8]
kernel_sizes_per_layers = [3, 3, 3, 5, 5, 5]
strides_per_stage = [1, 2, 2, 2, 1, 2]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNetEdge(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_epsilon=bn_epsilon,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def efficientnet_edge_small_b(in_size=(224, 224), **kwargs):
"""
EfficientNet-Edge-Small-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="small", in_size=in_size, tf_mode=True, bn_epsilon=1e-3,
model_name="efficientnet_edge_small_b", **kwargs)
def efficientnet_edge_medium_b(in_size=(240, 240), **kwargs):
"""
EfficientNet-Edge-Medium-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="medium", in_size=in_size, tf_mode=True, bn_epsilon=1e-3,
model_name="efficientnet_edge_medium_b", **kwargs)
def efficientnet_edge_large_b(in_size=(300, 300), **kwargs):
"""
EfficientNet-Edge-Large-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="large", in_size=in_size, tf_mode=True, bn_epsilon=1e-3,
model_name="efficientnet_edge_large_b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
efficientnet_edge_small_b,
efficientnet_edge_medium_b,
efficientnet_edge_large_b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_edge_small_b or weight_count == 5438392)
assert (model != efficientnet_edge_medium_b or weight_count == 6899496)
assert (model != efficientnet_edge_large_b or weight_count == 10589712)
x = mx.nd.zeros((1, 3, net.in_size[0], net.in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 16,355 | 37.850356 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ibnresnext.py | """
IBN-ResNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['IBNResNeXt', 'ibn_resnext50_32x4d', 'ibn_resnext101_32x4d', 'ibn_resnext101_64x4d']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
from .ibnresnet import ibn_conv1x1_block
class IBNResNeXtBottleneck(HybridBlock):
"""
IBN-ResNeXt bottleneck block for residual path in IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
conv1_ibn,
bn_use_global_stats,
**kwargs):
super(IBNResNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
use_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IBNResNeXtUnit(HybridBlock):
"""
IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
conv1_ibn,
bn_use_global_stats,
**kwargs):
super(IBNResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = IBNResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class IBNResNeXt(HybridBlock):
"""
IBN-ResNeXt model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IBNResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add(IBNResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ibnresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBN-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported IBN-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibn_resnext50_32x4d(**kwargs):
"""
IBN-ResNeXt-50 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="ibn_resnext50_32x4d", **kwargs)
def ibn_resnext101_32x4d(**kwargs):
"""
IBN-ResNeXt-101 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="ibn_resnext101_32x4d", **kwargs)
def ibn_resnext101_64x4d(**kwargs):
"""
IBN-ResNeXt-101 (64x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="ibn_resnext101_64x4d", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ibn_resnext50_32x4d,
ibn_resnext101_32x4d,
ibn_resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnext50_32x4d or weight_count == 25028904)
assert (model != ibn_resnext101_32x4d or weight_count == 44177704)
assert (model != ibn_resnext101_64x4d or weight_count == 83455272)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,457 | 32.853261 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/squeezenext.py | """
SqueezeNext for ImageNet-1K, implemented in Gluon.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock, conv1x1_block, conv7x7_block
class SqnxtUnit(HybridBlock):
"""
SqueezeNext unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides,
**kwargs):
super(SqnxtUnit, self).__init__(**kwargs)
if strides == 2:
reduction_den = 1
self.resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
self.resize_identity = True
else:
reduction_den = 2
self.resize_identity = False
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
strides=strides,
use_bias=True)
self.conv2 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
use_bias=True)
self.conv3 = ConvBlock(
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
use_bias=True)
self.conv4 = ConvBlock(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
use_bias=True)
self.conv5 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
use_bias=True)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x + identity
x = self.activ(x)
return x
class SqnxtInitBlock(HybridBlock):
"""
SqueezeNext specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(SqnxtInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=1,
use_bias=True)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
ceil_mode=True)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class SqueezeNext(HybridBlock):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SqueezeNext, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SqnxtInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SqnxtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
use_bias=True))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,315 | 32.124378 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/xdensenet.py | """
X-DenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block',
'XDenseUnit']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
@mx.init.register
class XMaskInit(mx.init.Initializer):
"""
Returns an initializer performing "X-Net" initialization for masks.
Parameters:
----------
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
expand_ratio,
**kwargs):
super(XMaskInit, self).__init__(**kwargs)
assert (expand_ratio > 0)
self.expand_ratio = expand_ratio
def _init_weight(self, _, arr):
shape = arr.shape
expand_size = max(shape[1] // self.expand_ratio, 1)
shape1_arange = mx.nd.arange(shape[1], ctx=arr.context)
arr[:] = 0
for i in range(shape[0]):
jj = mx.nd.random.shuffle(shape1_arange)[:expand_size]
arr[i, jj, :, :] = 1
class XConv2D(nn.Conv2D):
"""
X-Convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
groups : int, default 1
Number of groups.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
groups=1,
expand_ratio=2,
**kwargs):
super(XConv2D, self).__init__(
in_channels=in_channels,
channels=out_channels,
kernel_size=kernel_size,
groups=groups,
**kwargs)
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
grouped_in_channels = in_channels // groups
self.mask = self.params.get(
name="mask",
grad_req="null",
shape=(out_channels, grouped_in_channels, kernel_size[0], kernel_size[1]),
init=XMaskInit(expand_ratio=expand_ratio),
differentiable=False)
def hybrid_forward(self, F, x, weight, bias=None, mask=None):
masked_weight = weight * mask
return super(XConv2D, self).hybrid_forward(F, x, weight=masked_weight, bias=bias)
class PreXConvBlock(HybridBlock):
"""
X-Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
bn_use_global_stats=False,
return_preact=False,
activate=True,
expand_ratio=2,
**kwargs):
super(PreXConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
self.activate = activate
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
self.conv = XConv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
expand_ratio=expand_ratio)
def hybrid_forward(self, F, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_xconv1x1_block(in_channels,
out_channels,
strides=1,
use_bias=False,
bn_use_global_stats=False,
return_preact=False,
activate=True,
expand_ratio=2):
"""
1x1 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
def pre_xconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
bn_use_global_stats=False,
return_preact=False,
activate=True,
expand_ratio=2):
"""
3x3 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
class XDenseUnit(HybridBlock):
"""
X-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
expand_ratio,
**kwargs):
super(XDenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = pre_xconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
expand_ratio=expand_ratio)
self.conv2 = pre_xconv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class XDenseNet(HybridBlock):
"""
X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(XDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(XDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_xdensenet(blocks,
expand_ratio=2,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create X-DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported X-DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = XDenseNet(
channels=channels,
init_block_channels=init_block_channels,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def xdensenet121_2(**kwargs):
"""
X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=121, model_name="xdensenet121_2", **kwargs)
def xdensenet161_2(**kwargs):
"""
X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=161, model_name="xdensenet161_2", **kwargs)
def xdensenet169_2(**kwargs):
"""
X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=169, model_name="xdensenet169_2", **kwargs)
def xdensenet201_2(**kwargs):
"""
X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=201, model_name="xdensenet201_2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
xdensenet121_2,
xdensenet161_2,
xdensenet169_2,
xdensenet201_2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet121_2 or weight_count == 7978856)
assert (model != xdensenet161_2 or weight_count == 28681000)
assert (model != xdensenet169_2 or weight_count == 14149480)
assert (model != xdensenet201_2 or weight_count == 20013928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 19,138 | 31.94148 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diaresnet_cifar.py | """
DIA-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['CIFARDIAResNet', 'diaresnet20_cifar10', 'diaresnet20_cifar100', 'diaresnet20_svhn', 'diaresnet56_cifar10',
'diaresnet56_cifar100', 'diaresnet56_svhn', 'diaresnet110_cifar10', 'diaresnet110_cifar100',
'diaresnet110_svhn', 'diaresnet164bn_cifar10', 'diaresnet164bn_cifar100', 'diaresnet164bn_svhn',
'diaresnet1001_cifar10', 'diaresnet1001_cifar100', 'diaresnet1001_svhn', 'diaresnet1202_cifar10',
'diaresnet1202_cifar100', 'diaresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block, DualPathSequential
from .diaresnet import DIAAttention, DIAResUnit
class CIFARDIAResNet(HybridBlock):
"""
DIA-ResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10):
super(CIFARDIAResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(
return_two=False,
prefix="stage{}_".format(i + 1))
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
with stage.name_scope():
stage.add(DIAResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False,
attention=attention))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diaresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diaresnet20_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar10",
**kwargs)
def diaresnet20_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar100",
**kwargs)
def diaresnet20_svhn(classes=10, **kwargs):
"""
DIA-ResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_svhn",
**kwargs)
def diaresnet56_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar10",
**kwargs)
def diaresnet56_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar100",
**kwargs)
def diaresnet56_svhn(classes=10, **kwargs):
"""
DIA-ResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_svhn",
**kwargs)
def diaresnet110_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar10",
**kwargs)
def diaresnet110_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar100",
**kwargs)
def diaresnet110_svhn(classes=10, **kwargs):
"""
DIA-ResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_svhn",
**kwargs)
def diaresnet164bn_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar10",
**kwargs)
def diaresnet164bn_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar100",
**kwargs)
def diaresnet164bn_svhn(classes=10, **kwargs):
"""
DIA-ResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_svhn",
**kwargs)
def diaresnet1001_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar10",
**kwargs)
def diaresnet1001_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar100",
**kwargs)
def diaresnet1001_svhn(classes=10, **kwargs):
"""
DIA-ResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_svhn",
**kwargs)
def diaresnet1202_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar10",
**kwargs)
def diaresnet1202_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar100",
**kwargs)
def diaresnet1202_svhn(classes=10, **kwargs):
"""
DIA-ResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(diaresnet20_cifar10, 10),
(diaresnet20_cifar100, 100),
(diaresnet20_svhn, 10),
(diaresnet56_cifar10, 10),
(diaresnet56_cifar100, 100),
(diaresnet56_svhn, 10),
(diaresnet110_cifar10, 10),
(diaresnet110_cifar100, 100),
(diaresnet110_svhn, 10),
(diaresnet164bn_cifar10, 10),
(diaresnet164bn_cifar100, 100),
(diaresnet164bn_svhn, 10),
(diaresnet1001_cifar10, 10),
(diaresnet1001_cifar100, 100),
(diaresnet1001_svhn, 10),
(diaresnet1202_cifar10, 10),
(diaresnet1202_cifar100, 100),
(diaresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diaresnet20_cifar10 or weight_count == 286866)
assert (model != diaresnet20_cifar100 or weight_count == 292716)
assert (model != diaresnet20_svhn or weight_count == 286866)
assert (model != diaresnet56_cifar10 or weight_count == 870162)
assert (model != diaresnet56_cifar100 or weight_count == 876012)
assert (model != diaresnet56_svhn or weight_count == 870162)
assert (model != diaresnet110_cifar10 or weight_count == 1745106)
assert (model != diaresnet110_cifar100 or weight_count == 1750956)
assert (model != diaresnet110_svhn or weight_count == 1745106)
assert (model != diaresnet164bn_cifar10 or weight_count == 1923002)
assert (model != diaresnet164bn_cifar100 or weight_count == 1946132)
assert (model != diaresnet164bn_svhn or weight_count == 1923002)
assert (model != diaresnet1001_cifar10 or weight_count == 10547450)
assert (model != diaresnet1001_cifar100 or weight_count == 10570580)
assert (model != diaresnet1001_svhn or weight_count == 10547450)
assert (model != diaresnet1202_cifar10 or weight_count == 19438418)
assert (model != diaresnet1202_cifar100 or weight_count == 19444268)
assert (model != diaresnet1202_svhn or weight_count == 19438418)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 21,836 | 36.137755 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resdropresnet_cifar.py | """
ResDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
"""
__all__ = ['CIFARResDropResNet', 'resdropresnet20_cifar10', 'resdropresnet20_cifar100', 'resdropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ResDropResUnit(HybridBlock):
"""
ResDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ResDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.life_prob)
x = float(b) / self.life_prob * x
x = x + identity
x = self.activ(x)
return x
class CIFARResDropResNet(HybridBlock):
"""
ResDrop-ResNet model for CIFAR from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARResDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resdropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARResDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resdropresnet20_cifar10(classes=10, **kwargs):
"""
ResDrop-ResNet-20 model for CIFAR-10 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar10",
**kwargs)
def resdropresnet20_cifar100(classes=100, **kwargs):
"""
ResDrop-ResNet-20 model for CIFAR-100 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar100",
**kwargs)
def resdropresnet20_svhn(classes=10, **kwargs):
"""
ResDrop-ResNet-20 model for SVHN from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(resdropresnet20_cifar10, 10),
(resdropresnet20_cifar100, 100),
(resdropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resdropresnet20_cifar10 or weight_count == 272474)
assert (model != resdropresnet20_cifar100 or weight_count == 278324)
assert (model != resdropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
y = net(x)
# with mx.autograd.record():
# y = net(x)
# y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 11,219 | 33.62963 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/bisenet.py | """
BiSeNet for CelebAMask-HQ, implemented in Gluon.
Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
"""
__all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .resnet import resnet18
class PyramidPoolingZeroBranch(HybridBlock):
"""
Pyramid pooling zero branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
**kwargs):
super(PyramidPoolingZeroBranch, self).__init__(**kwargs)
self.in_size = in_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.up = InterpolationBlock(scale_factor=None)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv(x)
x = self.up(x, in_size)
return x
class AttentionRefinementBlock(HybridBlock):
"""
Attention refinement block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(AttentionRefinementBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels)
self.conv2 = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
activation=(lambda: nn.Activation("sigmoid")))
def hybrid_forward(self, F, x):
x = self.conv1(x)
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = self.conv2(w)
x = x * w
return x
class PyramidPoolingMainBranch(HybridBlock):
"""
Pyramid pooling main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : float
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
**kwargs):
super(PyramidPoolingMainBranch, self).__init__(**kwargs)
with self.name_scope():
self.att = AttentionRefinementBlock(
in_channels=in_channels,
out_channels=out_channels)
self.up = InterpolationBlock(
scale_factor=scale_factor,
bilinear=False)
self.conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x, y):
x = self.att(x)
x = x + y
x = self.up(x)
x = self.conv(x)
return x
class FeatureFusion(HybridBlock):
"""
Feature fusion block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 4
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
reduction=4,
**kwargs):
super(FeatureFusion, self).__init__(**kwargs)
mid_channels = out_channels // reduction
with self.name_scope():
self.conv_merge = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels)
self.activ = nn.Activation("relu")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x, y):
x = F.concat(x, y, dim=1)
x = self.conv_merge(x)
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x_att = x * w
x = x + x_att
return x
class PyramidPooling(HybridBlock):
"""
Pyramid Pooling module.
Parameters:
----------
x16_in_channels : int
Number of input channels for x16.
x32_in_channels : int
Number of input channels for x32.
y_out_channels : int
Number of output channels for y-outputs.
y32_out_size : tuple of 2 int
Spatial size of the y32 tensor.
"""
def __init__(self,
x16_in_channels,
x32_in_channels,
y_out_channels,
y32_out_size,
**kwargs):
super(PyramidPooling, self).__init__(**kwargs)
z_out_channels = 2 * y_out_channels
with self.name_scope():
self.pool32 = PyramidPoolingZeroBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
in_size=y32_out_size)
self.pool16 = PyramidPoolingMainBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
scale_factor=2)
self.pool8 = PyramidPoolingMainBranch(
in_channels=x16_in_channels,
out_channels=y_out_channels,
scale_factor=2)
self.fusion = FeatureFusion(
in_channels=z_out_channels,
out_channels=z_out_channels)
def hybrid_forward(self, F, x8, x16, x32):
y32 = self.pool32(x32)
y16 = self.pool16(x32, y32)
y8 = self.pool8(x16, y16)
z8 = self.fusion(x8, y8)
return z8, y8, y16
class BiSeHead(HybridBlock):
"""
BiSeNet head (final) block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
**kwargs):
super(BiSeHead, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class BiSeNet(HybridBlock):
"""
BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
Parameters:
----------
backbone : func -> nn.Sequential
Feature extractor.
aux : bool, default True
Whether to output an auxiliary results.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 480)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
backbone,
aux=True,
fixed_size=True,
in_channels=3,
in_size=(640, 480),
classes=19,
**kwargs):
super(BiSeNet, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone, backbone_out_channels = backbone()
y_out_channels = backbone_out_channels[0]
z_out_channels = 2 * y_out_channels
y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
self.pool = PyramidPooling(
x16_in_channels=backbone_out_channels[1],
x32_in_channels=backbone_out_channels[2],
y_out_channels=y_out_channels,
y32_out_size=y32_out_size)
self.head_z8 = BiSeHead(
in_channels=z_out_channels,
mid_channels=z_out_channels,
out_channels=classes)
self.up8 = InterpolationBlock(scale_factor=(8 if fixed_size else None))
if self.aux:
mid_channels = y_out_channels // 2
self.head_y8 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=classes)
self.head_y16 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=classes)
self.up16 = InterpolationBlock(scale_factor=(16 if fixed_size else None))
def hybrid_forward(self, F, x):
assert (x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0)
x8, x16, x32 = self.backbone(x)
z8, y8, y16 = self.pool(x8, x16, x32)
z8 = self.head_z8(z8)
z8 = self.up8(z8)
if self.aux:
y8 = self.head_y8(y8)
y16 = self.head_y16(y16)
y8 = self.up8(y8)
y16 = self.up16(y16)
return z8, y8, y16
else:
return z8
def get_bisenet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create BiSeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = BiSeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def bisenet_resnet18_celebamaskhq(pretrained_backbone=False, classes=19, **kwargs):
"""
BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation
Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
def backbone(**bb_kwargs):
features_raw = resnet18(pretrained=pretrained_backbone, **bb_kwargs).features[:-1]
features = MultiOutputSequential(return_last=False)
features.add(features_raw[0])
for i, stage in enumerate(features_raw[1:]):
if i != 0:
stage.do_output = True
features.add(stage)
out_channels = [128, 256, 512]
return features, out_channels
return get_bisenet(backbone=backbone, classes=classes, model_name="bisenet_resnet18_celebamaskhq", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (640, 480)
aux = True
pretrained = False
models = [
bisenet_resnet18_celebamaskhq,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416)
else:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272)
batch = 1
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape == (batch, 19, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 13,872 | 30.386878 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnet.py | """
ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock', 'get_resnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class ResBlock(HybridBlock):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ResBottleneck(HybridBlock):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
bn_use_global_stats=False,
bn_cudnn_off=False,
conv1_stride=False,
bottleneck_factor=4,
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
padding=padding,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResUnit(HybridBlock):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
bottleneck=True,
conv1_stride=False,
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResInitBlock(HybridBlock):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ResNet(HybridBlock):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 30,943 | 34.123723 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/simpleposemobile_coco.py | """
SimplePose(Mobile) for COCO Keypoint, implemented in Gluon.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePoseMobile', 'simplepose_mobile_resnet18_coco', 'simplepose_mobile_resnet50b_coco',
'simplepose_mobile_mobilenet_w1_coco', 'simplepose_mobile_mobilenetv2b_w1_coco',
'simplepose_mobile_mobilenetv3_small_w1_coco', 'simplepose_mobile_mobilenetv3_large_w1_coco']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, DucBlock, HeatmapMaxDetBlock
from .resnet import resnet18, resnet50b
from .mobilenet import mobilenet_w1
from .mobilenetv2 import mobilenetv2b_w1
from .mobilenetv3 import mobilenetv3_small_w1, mobilenetv3_large_w1
class SimplePoseMobile(HybridBlock):
"""
SimplePose(Mobile) model from 'Simple Baselines for Human Pose Estimation and Tracking,'
https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
decoder_init_block_channels : int
Number of output channels for the initial unit of the decoder.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
return_heatmap : bool, default False
Whether to return only heatmap.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
decoder_init_block_channels,
bn_use_global_stats=False,
bn_cudnn_off=True,
return_heatmap=False,
fixed_size=True,
in_channels=3,
in_size=(256, 192),
keypoints=17,
**kwargs):
super(SimplePoseMobile, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
with self.name_scope():
self.backbone = backbone
self.decoder = nn.HybridSequential(prefix="")
in_channels = backbone_out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=decoder_init_block_channels))
in_channels = decoder_init_block_channels
for out_channels in channels:
self.decoder.add(DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=keypoints))
self.heatmap_max_det = HeatmapMaxDetBlock(
channels=keypoints,
in_size=(in_size[0] // 4, in_size[1] // 4),
fixed_size=fixed_size)
def hybrid_forward(self, F, x):
x = self.backbone(x)
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simpleposemobile(backbone,
backbone_out_channels,
keypoints,
bn_cudnn_off,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SimplePose(Mobile) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [128, 64, 32]
decoder_init_block_channels = 256
net = SimplePoseMobile(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
decoder_init_block_channels=decoder_init_block_channels,
bn_cudnn_off=bn_cudnn_off,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def simplepose_mobile_resnet18_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_resnet18_coco", **kwargs)
def simplepose_mobile_resnet50b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_resnet50b_coco", **kwargs)
def simplepose_mobile_mobilenet_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenet_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1024, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenet_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv2b_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNetV2b-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv2b_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1280, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenetv2b_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv3_small_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Small 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_small_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=576, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenetv3_small_w1_coco",
**kwargs)
def simplepose_mobile_mobilenetv3_large_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Large 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_large_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=960, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenetv3_large_w1_coco",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (256, 192)
keypoints = 17
return_heatmap = True
pretrained = False
models = [
simplepose_mobile_resnet18_coco,
simplepose_mobile_resnet50b_coco,
simplepose_mobile_mobilenet_w1_coco,
simplepose_mobile_mobilenetv2b_w1_coco,
simplepose_mobile_mobilenetv3_small_w1_coco,
simplepose_mobile_mobilenetv3_large_w1_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_mobile_resnet18_coco or weight_count == 12858208)
assert (model != simplepose_mobile_resnet50b_coco or weight_count == 25582944)
assert (model != simplepose_mobile_mobilenet_w1_coco or weight_count == 5019744)
# assert (model != simplepose_mobile_mobilenetv2b_w1_coco or weight_count == 4102176)
assert (model != simplepose_mobile_mobilenetv3_small_w1_coco or weight_count == 2625088)
assert (model != simplepose_mobile_mobilenetv3_large_w1_coco or weight_count == 4768336)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 15,126 | 40.217984 | 121 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/cbamresnet.py | """
CBAM-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
"""
__all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv7x7_block
from .resnet import ResInitBlock, ResBlock, ResBottleneck
class MLP(HybridBlock):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16,
**kwargs):
super(MLP, self).__init__(**kwargs)
mid_channels = channels // reduction_ratio
with self.name_scope():
self.flatten = nn.Flatten()
self.fc1 = nn.Dense(
units=mid_channels,
in_units=channels)
self.activ = nn.Activation("relu")
self.fc2 = nn.Dense(
units=channels,
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.flatten(x)
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class ChannelGate(HybridBlock):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16,
**kwargs):
super(ChannelGate, self).__init__(**kwargs)
with self.name_scope():
self.avg_pool = nn.GlobalAvgPool2D()
self.max_pool = nn.GlobalMaxPool2D()
self.mlp = MLP(
channels=channels,
reduction_ratio=reduction_ratio)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
att1 = self.avg_pool(x)
att1 = self.mlp(att1)
att2 = self.max_pool(x)
att2 = self.mlp(att2)
att = att1 + att2
att = self.sigmoid(att)
att = att.expand_dims(2).expand_dims(3).broadcast_like(x)
x = x * att
return x
class SpatialGate(HybridBlock):
"""
CBAM spatial gate block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(SpatialGate, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=2,
out_channels=1,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
att1 = x.max(axis=1).expand_dims(1)
att2 = x.mean(axis=1).expand_dims(1)
att = F.concat(att1, att2, dim=1)
att = self.conv(att)
att = self.sigmoid(att).broadcast_like(x)
x = x * att
return x
class CbamBlock(HybridBlock):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
channels,
reduction_ratio=16,
bn_use_global_stats=False,
**kwargs):
super(CbamBlock, self).__init__(**kwargs)
with self.name_scope():
self.ch_gate = ChannelGate(
channels=channels,
reduction_ratio=reduction_ratio)
self.sp_gate = SpatialGate(bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.ch_gate(x)
x = self.sp_gate(x)
return x
class CbamResUnit(HybridBlock):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(CbamResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=False)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.cbam = CbamBlock(channels=out_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.cbam(x)
x = x + identity
x = self.activ(x)
return x
class CbamResNet(HybridBlock):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CbamResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(CbamResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
# cbam_resnet18,
# cbam_resnet34,
cbam_resnet50,
# cbam_resnet101,
# cbam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cbam_resnet18 or weight_count == 11779392)
assert (model != cbam_resnet34 or weight_count == 21960468)
assert (model != cbam_resnet50 or weight_count == 28089624)
assert (model != cbam_resnet101 or weight_count == 49330172)
assert (model != cbam_resnet152 or weight_count == 66826848)
x = mx.nd.zeros((2, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (2, 1000))
if __name__ == "__main__":
_test()
| 15,289 | 30.987448 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diracnetv2.py | """
DiracNetV2 for ImageNet-1K, implemented in Gluon.
Original paper: 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
"""
__all__ = ['DiracNetV2', 'diracnet18v2', 'diracnet34v2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class DiracConv(HybridBlock):
"""
DiracNetV2 specific convolution block with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
**kwargs):
super(DiracConv, self).__init__(**kwargs)
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x = self.conv(x)
return x
def dirac_conv3x3(in_channels,
out_channels,
**kwargs):
"""
3x3 version of the DiracNetV2 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return DiracConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
**kwargs)
class DiracInitBlock(HybridBlock):
"""
DiracNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(DiracInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=in_channels)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class DiracNetV2(HybridBlock):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DiracNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(DiracInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(dirac_conv3x3(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
if i != len(channels) - 1:
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0))
self.features.add(stage)
self.features.add(nn.Activation("relu"))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diracnetv2(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DiracNetV2 model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [4, 4, 4, 4]
elif blocks == 34:
layers = [6, 8, 12, 6]
else:
raise ValueError("Unsupported DiracNetV2 with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels = 64
net = DiracNetV2(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diracnet18v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=18, model_name="diracnet18v2", **kwargs)
def diracnet34v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=34, model_name="diracnet34v2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
diracnet18v2,
diracnet34v2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diracnet18v2 or weight_count == 11511784)
assert (model != diracnet34v2 or weight_count == 21616232)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,008 | 29.03 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sepreresnet_cifar.py | """
SE-PreResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn',
'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn',
'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn',
'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn',
'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn',
'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn',
'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn',
'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .sepreresnet import SEPreResUnit
class CIFARSEPreResNet(HybridBlock):
"""
SE-PreResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARSEPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sepreresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sepreresnet20_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar10",
**kwargs)
def sepreresnet20_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar100",
**kwargs)
def sepreresnet20_svhn(classes=10, **kwargs):
"""
SE-PreResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn",
**kwargs)
def sepreresnet56_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar10",
**kwargs)
def sepreresnet56_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar100",
**kwargs)
def sepreresnet56_svhn(classes=10, **kwargs):
"""
SE-PreResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn",
**kwargs)
def sepreresnet110_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar10",
**kwargs)
def sepreresnet110_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar100",
**kwargs)
def sepreresnet110_svhn(classes=10, **kwargs):
"""
SE-PreResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_svhn",
**kwargs)
def sepreresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar10",
**kwargs)
def sepreresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar100",
**kwargs)
def sepreresnet164bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_svhn",
**kwargs)
def sepreresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar10",
**kwargs)
def sepreresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar100",
**kwargs)
def sepreresnet272bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_svhn",
**kwargs)
def sepreresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar10",
**kwargs)
def sepreresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar100",
**kwargs)
def sepreresnet542bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_svhn",
**kwargs)
def sepreresnet1001_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar10",
**kwargs)
def sepreresnet1001_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar100",
**kwargs)
def sepreresnet1001_svhn(classes=10, **kwargs):
"""
SE-PreResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_svhn",
**kwargs)
def sepreresnet1202_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar10",
**kwargs)
def sepreresnet1202_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar100",
**kwargs)
def sepreresnet1202_svhn(classes=10, **kwargs):
"""
SE-PreResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(sepreresnet20_cifar10, 10),
(sepreresnet20_cifar100, 100),
(sepreresnet20_svhn, 10),
(sepreresnet56_cifar10, 10),
(sepreresnet56_cifar100, 100),
(sepreresnet56_svhn, 10),
(sepreresnet110_cifar10, 10),
(sepreresnet110_cifar100, 100),
(sepreresnet110_svhn, 10),
(sepreresnet164bn_cifar10, 10),
(sepreresnet164bn_cifar100, 100),
(sepreresnet164bn_svhn, 10),
(sepreresnet272bn_cifar10, 10),
(sepreresnet272bn_cifar100, 100),
(sepreresnet272bn_svhn, 10),
(sepreresnet542bn_cifar10, 10),
(sepreresnet542bn_cifar100, 100),
(sepreresnet542bn_svhn, 10),
(sepreresnet1001_cifar10, 10),
(sepreresnet1001_cifar100, 100),
(sepreresnet1001_svhn, 10),
(sepreresnet1202_cifar10, 10),
(sepreresnet1202_cifar100, 100),
(sepreresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet20_cifar10 or weight_count == 274559)
assert (model != sepreresnet20_cifar100 or weight_count == 280409)
assert (model != sepreresnet20_svhn or weight_count == 274559)
assert (model != sepreresnet56_cifar10 or weight_count == 862601)
assert (model != sepreresnet56_cifar100 or weight_count == 868451)
assert (model != sepreresnet56_svhn or weight_count == 862601)
assert (model != sepreresnet110_cifar10 or weight_count == 1744664)
assert (model != sepreresnet110_cifar100 or weight_count == 1750514)
assert (model != sepreresnet110_svhn or weight_count == 1744664)
assert (model != sepreresnet164bn_cifar10 or weight_count == 1904882)
assert (model != sepreresnet164bn_cifar100 or weight_count == 1928012)
assert (model != sepreresnet164bn_svhn or weight_count == 1904882)
assert (model != sepreresnet272bn_cifar10 or weight_count == 3152450)
assert (model != sepreresnet272bn_cifar100 or weight_count == 3175580)
assert (model != sepreresnet272bn_svhn or weight_count == 3152450)
assert (model != sepreresnet542bn_cifar10 or weight_count == 6271370)
assert (model != sepreresnet542bn_cifar100 or weight_count == 6294500)
assert (model != sepreresnet542bn_svhn or weight_count == 6271370)
assert (model != sepreresnet1001_cifar10 or weight_count == 11573534)
assert (model != sepreresnet1001_cifar100 or weight_count == 11596664)
assert (model != sepreresnet1001_svhn or weight_count == 11573534)
assert (model != sepreresnet1202_cifar10 or weight_count == 19581938)
assert (model != sepreresnet1202_cifar100 or weight_count == 19587788)
assert (model != sepreresnet1202_svhn or weight_count == 19581938)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 26,868 | 37.604885 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/danet.py | """
DANet for image segmentation, implemented in Gluon.
Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
"""
__all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes', 'ScaleBlock']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class ScaleBlock(HybridBlock):
"""
Simple scale block.
"""
def __init__(self,
**kwargs):
super(ScaleBlock, self).__init__(**kwargs)
with self.name_scope():
self.alpha = self.params.get(
"alpha",
shape=(1,),
init=mx.init.Zero(),
allow_deferred_init=True)
def hybrid_forward(self, F, x, alpha):
return F.broadcast_mul(alpha, x)
def __repr__(self):
s = '{name}(alpha={alpha})'
return s.format(
name=self.__class__.__name__,
gamma=self.alpha.shape[0])
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x.size
num_macs = 0
return num_flops, num_macs
class PosAttBlock(HybridBlock):
"""
Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It captures long-range spatial contextual information.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 8
Squeeze reduction value.
"""
def __init__(self,
channels,
reduction=8,
**kwargs):
super(PosAttBlock, self).__init__(**kwargs)
mid_channels = channels // reduction
with self.name_scope():
self.query_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.key_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.value_conv = conv1x1(
in_channels=channels,
out_channels=channels,
use_bias=True)
self.scale = ScaleBlock()
def hybrid_forward(self, F, x):
proj_query = self.query_conv(x).reshape((0, 0, -1))
proj_key = self.key_conv(x).reshape((0, 0, -1))
proj_value = self.value_conv(x).reshape((0, 0, -1))
energy = F.batch_dot(proj_query, proj_key, transpose_a=True)
w = F.softmax(energy)
y = F.batch_dot(proj_value, w, transpose_b=True)
y = F.reshape_like(y, x, lhs_begin=2, lhs_end=None, rhs_begin=2, rhs_end=None)
y = self.scale(y) + x
return y
class ChaAttBlock(HybridBlock):
"""
Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It explicitly models interdependencies between channels.
"""
def __init__(self,
**kwargs):
super(ChaAttBlock, self).__init__(**kwargs)
with self.name_scope():
self.scale = ScaleBlock()
def hybrid_forward(self, F, x):
proj_query = x.reshape((0, 0, -1))
proj_key = x.reshape((0, 0, -1))
proj_value = x.reshape((0, 0, -1))
energy = F.batch_dot(proj_query, proj_key, transpose_b=True)
energy_new = energy.max(axis=-1, keepdims=True).broadcast_like(energy) - energy
w = F.softmax(energy_new)
y = F.batch_dot(w, proj_value)
y = F.reshape_like(y, x, lhs_begin=2, lhs_end=None, rhs_begin=2, rhs_end=None)
y = self.scale(y) + x
return y
class DANetHeadBranch(HybridBlock):
"""
DANet head branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pose_att : bool, default True
Whether to use position attention instead of channel one.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
pose_att=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DANetHeadBranch, self).__init__(**kwargs)
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if pose_att:
self.att = PosAttBlock(mid_channels)
else:
self.att = ChaAttBlock()
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.att(x)
y = self.conv2(x)
x = self.conv3(y)
x = self.dropout(x)
return x, y
class DANetHead(HybridBlock):
"""
DANet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DANetHead, self).__init__(**kwargs)
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.name_scope():
self.branch_pa = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=True,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.branch_ca = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=False,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
pa_x, pa_y = self.branch_pa(x)
ca_x, ca_y = self.branch_ca(x)
y = pa_y + ca_y
x = self.conv(y)
x = self.dropout(x)
return x, pa_x, ca_x
class DANet(HybridBlock):
"""
DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(480, 480),
classes=19,
**kwargs):
super(DANet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
self.head = DANetHead(
in_channels=backbone_out_channels,
out_channels=classes,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, _ = self.backbone(x)
x, y, z = self.head(x)
x = F.contrib.BilinearResize2D(x, height=in_size[0], width=in_size[1])
if self.aux:
y = F.contrib.BilinearResize2D(y, height=in_size[0], width=in_size[1])
z = F.contrib.BilinearResize2D(z, height=in_size[0], width=in_size[1])
return x, y, z
else:
return x
def get_danet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DANet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = DANet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def danet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd50b_cityscapes",
**kwargs)
def danet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
danet_resnetd50b_cityscapes,
danet_resnetd101b_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427)
assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555)
batch = 14
classes = 19
x = mx.nd.zeros((batch, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 14,852 | 32.680272 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mobilenetv2.py | """
MobileNetV2 for ImageNet-1K, implemented in Gluon.
Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
"""
__all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4', 'mobilenetv2b_w1',
'mobilenetv2b_w3d4', 'mobilenetv2b_wd2', 'mobilenetv2b_wd4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ReLU6, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block
class LinearBottleneck(HybridBlock):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : bool
Whether do expansion of channels.
remove_exp_conv : bool
Whether to remove expansion convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
expansion,
remove_exp_conv,
**kwargs):
super(LinearBottleneck, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
self.use_exp_conv = (expansion or (not remove_exp_conv))
with self.name_scope():
if self.use_exp_conv:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6())
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6())
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class MobileNetV2(HybridBlock):
"""
MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
remove_exp_conv : bool
Whether to remove expansion convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
remove_exp_conv,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MobileNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add(LinearBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion,
remove_exp_conv=remove_exp_conv))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=False))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mobilenetv2(width_scale,
remove_exp_conv=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MobileNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
remove_exp_conv : bool, default False
Whether to remove expansion convolution.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 2, 3, 4, 3, 3, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = int(final_block_channels * width_scale)
net = MobileNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
remove_exp_conv=remove_exp_conv,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mobilenetv2_w1(**kwargs):
"""
1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs)
def mobilenetv2_w3d4(**kwargs):
"""
0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs)
def mobilenetv2_wd2(**kwargs):
"""
0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs)
def mobilenetv2_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs)
def mobilenetv2b_w1(**kwargs):
"""
1.0 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, remove_exp_conv=True, model_name="mobilenetv2b_w1", **kwargs)
def mobilenetv2b_w3d4(**kwargs):
"""
0.75 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, remove_exp_conv=True, model_name="mobilenetv2b_w3d4", **kwargs)
def mobilenetv2b_wd2(**kwargs):
"""
0.5 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, remove_exp_conv=True, model_name="mobilenetv2b_wd2", **kwargs)
def mobilenetv2b_wd4(**kwargs):
"""
0.25 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, remove_exp_conv=True, model_name="mobilenetv2b_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenetv2_w1,
mobilenetv2_w3d4,
mobilenetv2_wd2,
mobilenetv2_wd4,
mobilenetv2b_w1,
mobilenetv2b_w3d4,
mobilenetv2b_wd2,
mobilenetv2b_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv2_w1 or weight_count == 3504960)
assert (model != mobilenetv2_w3d4 or weight_count == 2627592)
assert (model != mobilenetv2_wd2 or weight_count == 1964736)
assert (model != mobilenetv2_wd4 or weight_count == 1516392)
assert (model != mobilenetv2b_w1 or weight_count == 3503872)
assert (model != mobilenetv2b_w3d4 or weight_count == 2626968)
assert (model != mobilenetv2b_wd2 or weight_count == 1964448)
assert (model != mobilenetv2b_wd4 or weight_count == 1516312)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,563 | 34.696078 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/squeezenet.py | """
SqueezeNet for ImageNet-1K, implemented in Gluon.
Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
"""
__all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class FireConv(HybridBlock):
"""
SqueezeNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
**kwargs):
super(FireConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
padding=padding,
in_channels=in_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class FireUnit(HybridBlock):
"""
SqueezeNet unit, so-called 'Fire' unit.
Parameters:
----------
in_channels : int
Number of input channels.
squeeze_channels : int
Number of output channels for squeeze convolution blocks.
expand1x1_channels : int
Number of output channels for expand 1x1 convolution blocks.
expand3x3_channels : int
Number of output channels for expand 3x3 convolution blocks.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
squeeze_channels,
expand1x1_channels,
expand3x3_channels,
residual,
**kwargs):
super(FireUnit, self).__init__(**kwargs)
self.residual = residual
with self.name_scope():
self.squeeze = FireConv(
in_channels=in_channels,
out_channels=squeeze_channels,
kernel_size=1,
padding=0)
self.expand1x1 = FireConv(
in_channels=squeeze_channels,
out_channels=expand1x1_channels,
kernel_size=1,
padding=0)
self.expand3x3 = FireConv(
in_channels=squeeze_channels,
out_channels=expand3x3_channels,
kernel_size=3,
padding=1)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.squeeze(x)
y1 = self.expand1x1(x)
y2 = self.expand3x3(x)
out = F.concat(y1, y2, dim=1)
if self.residual:
out = out + identity
return out
class SqueezeInitBlock(HybridBlock):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
**kwargs):
super(SqueezeInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=2,
in_channels=in_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class SqueezeNet(HybridBlock):
"""
SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
residuals : bool
Whether to use residual units.
init_block_kernel_size : int or tuple/list of 2 int
The dimensions of the convolution window for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
residuals,
init_block_kernel_size,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SqueezeNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SqueezeInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
kernel_size=init_block_kernel_size))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
ceil_mode=True))
for j, out_channels in enumerate(channels_per_stage):
expand_channels = out_channels // 2
squeeze_channels = out_channels // 8
stage.add(FireUnit(
in_channels=in_channels,
squeeze_channels=squeeze_channels,
expand1x1_channels=expand_channels,
expand3x3_channels=expand_channels,
residual=((residuals is not None) and (residuals[i][j] == 1))))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.Dropout(rate=0.5))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Conv2D(
channels=classes,
kernel_size=1,
in_channels=in_channels))
self.output.add(nn.Activation("relu"))
self.output.add(nn.AvgPool2D(
pool_size=13,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_squeezenet(version,
residual=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SqueezeNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('1.0' or '1.1').
residual : bool, default False
Whether to use residual connections.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "1.0":
channels = [[128, 128, 256], [256, 384, 384, 512], [512]]
residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]
init_block_kernel_size = 7
init_block_channels = 96
elif version == "1.1":
channels = [[128, 128], [256, 256], [384, 384, 512, 512]]
residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]
init_block_kernel_size = 3
init_block_channels = 64
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
if not residual:
residuals = None
net = SqueezeNet(
channels=channels,
residuals=residuals,
init_block_kernel_size=init_block_kernel_size,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def squeezenet_v1_0(**kwargs):
"""
SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs)
def squeezenet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs)
def squeezeresnet_v1_0(**kwargs):
"""
SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
<0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs)
def squeezeresnet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
squeezenet_v1_0,
squeezenet_v1_1,
# squeezeresnet_v1_0,
# squeezeresnet_v1_1,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != squeezenet_v1_0 or weight_count == 1248424)
assert (model != squeezenet_v1_1 or weight_count == 1235496)
assert (model != squeezeresnet_v1_0 or weight_count == 1248424)
assert (model != squeezeresnet_v1_1 or weight_count == 1235496)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,810 | 32.018041 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/octresnet_cifar.py | """
Oct-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave
Convolution,' https://arxiv.org/abs/1904.05049.
"""
__all__ = ['CIFAROctResNet', 'octresnet20_ad2_cifar10', 'octresnet20_ad2_cifar100', 'octresnet20_ad2_svhn',
'octresnet56_ad2_cifar10', 'octresnet56_ad2_cifar100', 'octresnet56_ad2_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block, DualPathSequential
from .octresnet import OctResUnit
class CIFAROctResNet(HybridBlock):
"""
Oct-ResNet model for CIFAR from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with
Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
oct_alpha : float, default 0.5
Octave alpha coefficient.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
oct_alpha=0.5,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFAROctResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=1,
prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if (i == 0) and (j == 0):
oct_mode = "first"
elif (i == len(channels) - 1) and (j == 0):
oct_mode = "last"
elif (i == len(channels) - 1) and (j != 0):
oct_mode = "std"
else:
oct_mode = "norm"
stage.add(OctResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_octresnet_cifar(classes,
blocks,
bottleneck,
oct_alpha=0.5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Oct-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
oct_alpha : float, default 0.5
Octave alpha coefficient.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFAROctResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
oct_alpha=oct_alpha,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def octresnet20_ad2_cifar10(classes=10, **kwargs):
"""
Oct-ResNet-20 (alpha=1/2) model for CIFAR-10 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=20, bottleneck=False, oct_alpha=0.5,
model_name="octresnet20_ad2_cifar10", **kwargs)
def octresnet20_ad2_cifar100(classes=100, **kwargs):
"""
Oct-ResNet-20 (alpha=1/2) model for CIFAR-100 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=20, bottleneck=False, oct_alpha=0.5,
model_name="octresnet20_ad2_cifar100", **kwargs)
def octresnet20_ad2_svhn(classes=10, **kwargs):
"""
Oct-ResNet-20 (alpha=1/2) model for SVHN from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=20, bottleneck=False, oct_alpha=0.5,
model_name="octresnet20_ad2_svhn", **kwargs)
def octresnet56_ad2_cifar10(classes=10, **kwargs):
"""
Oct-ResNet-56 (alpha=1/2) model for CIFAR-10 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=56, bottleneck=False, oct_alpha=0.5,
model_name="octresnet56_ad2_cifar10", **kwargs)
def octresnet56_ad2_cifar100(classes=100, **kwargs):
"""
Oct-ResNet-56 (alpha=1/2) model for CIFAR-100 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=56, bottleneck=False, oct_alpha=0.5,
model_name="octresnet56_ad2_cifar100", **kwargs)
def octresnet56_ad2_svhn(classes=10, **kwargs):
"""
Oct-ResNet-56 (alpha=1/2) model for SVHN from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=56, bottleneck=False, oct_alpha=0.5,
model_name="octresnet56_ad2_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(octresnet20_ad2_cifar10, 10),
(octresnet20_ad2_cifar100, 100),
(octresnet20_ad2_svhn, 10),
(octresnet56_ad2_cifar10, 10),
(octresnet56_ad2_cifar100, 100),
(octresnet56_ad2_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != octresnet20_ad2_cifar10 or weight_count == 272762)
assert (model != octresnet20_ad2_cifar100 or weight_count == 278612)
assert (model != octresnet20_ad2_svhn or weight_count == 272762)
assert (model != octresnet56_ad2_cifar10 or weight_count == 856058)
assert (model != octresnet56_ad2_cifar100 or weight_count == 861908)
assert (model != octresnet56_ad2_svhn or weight_count == 856058)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 12,716 | 36.293255 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/nin_cifar.py | """
NIN for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Network In Network,' https://arxiv.org/abs/1312.4400.
"""
__all__ = ['CIFARNIN', 'nin_cifar10', 'nin_cifar100', 'nin_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class NINConv(HybridBlock):
"""
NIN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
**kwargs):
super(NINConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class CIFARNIN(HybridBlock):
"""
NIN model for CIFAR from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_kernel_sizes : list of int
Convolution window sizes for the first units in each stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
first_kernel_sizes,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARNIN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
if i == 1:
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1))
else:
stage.add(nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1))
stage.add(nn.Dropout(rate=0.5))
kernel_size = first_kernel_sizes[i] if j == 0 else 1
padding = (kernel_size - 1) // 2
stage.add(NINConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding))
in_channels = out_channels
self.features.add(stage)
self.output = nn.HybridSequential(prefix="")
self.output.add(NINConv(
in_channels=in_channels,
out_channels=classes,
kernel_size=1))
self.output.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_nin_cifar(classes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create NIN model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[192, 160, 96], [192, 192, 192], [192, 192]]
first_kernel_sizes = [5, 5, 3]
net = CIFARNIN(
channels=channels,
first_kernel_sizes=first_kernel_sizes,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def nin_cifar10(classes=10, **kwargs):
"""
NIN model for CIFAR-10 from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(classes=classes, model_name="nin_cifar10", **kwargs)
def nin_cifar100(classes=100, **kwargs):
"""
NIN model for CIFAR-100 from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(classes=classes, model_name="nin_cifar100", **kwargs)
def nin_svhn(classes=10, **kwargs):
"""
NIN model for SVHN from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(classes=classes, model_name="nin_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(nin_cifar10, 10),
(nin_cifar100, 100),
(nin_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nin_cifar10 or weight_count == 966986)
assert (model != nin_cifar100 or weight_count == 984356)
assert (model != nin_svhn or weight_count == 966986)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 8,489 | 31.037736 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/vgg.py | """
VGG for ImageNet-1K, implemented in Gluon.
Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
"""
__all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b',
'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
class VGGDense(HybridBlock):
"""
VGG specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(VGGDense, self).__init__(**kwargs)
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
in_units=in_channels)
self.activ = nn.Activation("relu")
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class VGGOutputBlock(HybridBlock):
"""
VGG specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(VGGOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
with self.name_scope():
self.fc1 = VGGDense(
in_channels=in_channels,
out_channels=mid_channels)
self.fc2 = VGGDense(
in_channels=mid_channels,
out_channels=mid_channels)
self.fc3 = nn.Dense(
units=classes,
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class VGG(HybridBlock):
"""
VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(VGG, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0))
self.features.add(stage)
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
in_channels = in_channels * 7 * 7
self.output.add(VGGOutputBlock(
in_channels=in_channels,
classes=classes))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_vgg(blocks,
use_bias=True,
use_bn=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
use_bias=use_bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def vgg11(**kwargs):
"""
VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, model_name="vgg11", **kwargs)
def vgg13(**kwargs):
"""
VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, model_name="vgg13", **kwargs)
def vgg16(**kwargs):
"""
VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, model_name="vgg16", **kwargs)
def vgg19(**kwargs):
"""
VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, model_name="vgg19", **kwargs)
def bn_vgg11(**kwargs):
"""
VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs)
def bn_vgg13(**kwargs):
"""
VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs)
def bn_vgg16(**kwargs):
"""
VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs)
def bn_vgg19(**kwargs):
"""
VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs)
def bn_vgg11b(**kwargs):
"""
VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs)
def bn_vgg13b(**kwargs):
"""
VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs)
def bn_vgg16b(**kwargs):
"""
VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs)
def bn_vgg19b(**kwargs):
"""
VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
vgg11,
vgg13,
vgg16,
vgg19,
bn_vgg11,
bn_vgg13,
bn_vgg16,
bn_vgg19,
bn_vgg11b,
bn_vgg13b,
bn_vgg16b,
bn_vgg19b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vgg11 or weight_count == 132863336)
assert (model != vgg13 or weight_count == 133047848)
assert (model != vgg16 or weight_count == 138357544)
assert (model != vgg19 or weight_count == 143667240)
assert (model != bn_vgg11 or weight_count == 132866088)
assert (model != bn_vgg13 or weight_count == 133050792)
assert (model != bn_vgg16 or weight_count == 138361768)
assert (model != bn_vgg19 or weight_count == 143672744)
assert (model != bn_vgg11b or weight_count == 132868840)
assert (model != bn_vgg13b or weight_count == 133053736)
assert (model != bn_vgg16b or weight_count == 138365992)
assert (model != bn_vgg19b or weight_count == 143678248)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,326 | 31.541401 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnet_cub.py | """
ResNet for CUB-200-2011, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub',
'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub',
'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub']
from .resnet import get_resnet
def resnet10_cub(classes=200, **kwargs):
"""
ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=10, model_name="resnet10_cub", **kwargs)
def resnet12_cub(classes=200, **kwargs):
"""
ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=12, model_name="resnet12_cub", **kwargs)
def resnet14_cub(classes=200, **kwargs):
"""
ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, model_name="resnet14_cub", **kwargs)
def resnetbc14b_cub(classes=200, **kwargs):
"""
ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub",
**kwargs)
def resnet16_cub(classes=200, **kwargs):
"""
ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=16, model_name="resnet16_cub", **kwargs)
def resnet18_cub(classes=200, **kwargs):
"""
ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=18, model_name="resnet18_cub", **kwargs)
def resnet26_cub(classes=200, **kwargs):
"""
ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs)
def resnetbc26b_cub(classes=200, **kwargs):
"""
ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub",
**kwargs)
def resnet34_cub(classes=200, **kwargs):
"""
ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=34, model_name="resnet34_cub", **kwargs)
def resnetbc38b_cub(classes=200, **kwargs):
"""
ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub",
**kwargs)
def resnet50_cub(classes=200, **kwargs):
"""
ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, model_name="resnet50_cub", **kwargs)
def resnet50b_cub(classes=200, **kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs)
def resnet101_cub(classes=200, **kwargs):
"""
ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, model_name="resnet101_cub", **kwargs)
def resnet101b_cub(classes=200, **kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs)
def resnet152_cub(classes=200, **kwargs):
"""
ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, model_name="resnet152_cub", **kwargs)
def resnet152b_cub(classes=200, **kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs)
def resnet200_cub(classes=200, **kwargs):
"""
ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, model_name="resnet200_cub", **kwargs)
def resnet200b_cub(classes=200, **kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resnet10_cub,
resnet12_cub,
resnet14_cub,
resnetbc14b_cub,
resnet16_cub,
resnet18_cub,
resnet26_cub,
resnetbc26b_cub,
resnet34_cub,
resnetbc38b_cub,
resnet50_cub,
resnet50b_cub,
resnet101_cub,
resnet101b_cub,
resnet152_cub,
resnet152b_cub,
resnet200_cub,
resnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10_cub or weight_count == 5008392)
assert (model != resnet12_cub or weight_count == 5082376)
assert (model != resnet14_cub or weight_count == 5377800)
assert (model != resnetbc14b_cub or weight_count == 8425736)
assert (model != resnet16_cub or weight_count == 6558472)
assert (model != resnet18_cub or weight_count == 11279112)
assert (model != resnet26_cub or weight_count == 17549832)
assert (model != resnetbc26b_cub or weight_count == 14355976)
assert (model != resnet34_cub or weight_count == 21387272)
assert (model != resnetbc38b_cub or weight_count == 20286216)
assert (model != resnet50_cub or weight_count == 23917832)
assert (model != resnet50b_cub or weight_count == 23917832)
assert (model != resnet101_cub or weight_count == 42909960)
assert (model != resnet101b_cub or weight_count == 42909960)
assert (model != resnet152_cub or weight_count == 58553608)
assert (model != resnet152b_cub or weight_count == 58553608)
assert (model != resnet200_cub or weight_count == 63034632)
assert (model != resnet200b_cub or weight_count == 63034632)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 200))
if __name__ == "__main__":
_test()
| 15,639 | 35.627635 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/bagnet.py | """
BagNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
"""
__all__ = ['BagNet', 'bagnet9', 'bagnet17', 'bagnet33']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, ConvBlock
class BagNetBottleneck(HybridBlock):
"""
BagNet bottleneck block for residual path in BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second convolution.
strides : int or tuple/list of 2 int
Strides of the second convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_use_global_stats,
bottleneck_factor=4,
**kwargs):
super(BagNetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=0,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class BagNetUnit(HybridBlock):
"""
BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second body convolution.
strides : int or tuple/list of 2 int
Strides of the second body convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_use_global_stats,
**kwargs):
super(BagNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = BagNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.resize_identity:
identity = F.slice_like(identity, x, axes=(2, 3))
x = x + identity
x = self.activ(x)
return x
class BagNetInitBlock(HybridBlock):
"""
BagNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(BagNetInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=0,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class BagNet(HybridBlock):
"""
BagNet model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_pool_size : int
Size of the pooling windows for final pool.
normal_kernel_sizes : list of int
Count of the first units with 3x3 convolution window size for each stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_pool_size,
normal_kernel_sizes,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(BagNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(BagNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != len(channels) - 1) else 1
kernel_size = 3 if j < normal_kernel_sizes[i] else 1
stage.add(BagNetUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=final_pool_size,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_bagnet(field,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create BagNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
layers = [3, 4, 6, 3]
if field == 9:
normal_kernel_sizes = [1, 1, 0, 0]
final_pool_size = 27
elif field == 17:
normal_kernel_sizes = [1, 1, 1, 0]
final_pool_size = 26
elif field == 33:
normal_kernel_sizes = [1, 1, 1, 1]
final_pool_size = 24
else:
raise ValueError("Unsupported BagNet with field: {}".format(field))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BagNet(
channels=channels,
init_block_channels=init_block_channels,
final_pool_size=final_pool_size,
normal_kernel_sizes=normal_kernel_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def bagnet9(**kwargs):
"""
BagNet-9 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=9, model_name="bagnet9", **kwargs)
def bagnet17(**kwargs):
"""
BagNet-17 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=17, model_name="bagnet17", **kwargs)
def bagnet33(**kwargs):
"""
BagNet-33 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=33, model_name="bagnet33", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
bagnet9,
bagnet17,
bagnet33,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bagnet9 or weight_count == 15688744)
assert (model != bagnet17 or weight_count == 16213032)
assert (model != bagnet33 or weight_count == 18310184)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,862 | 32.066838 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/airnet.py | """
AirNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNet', 'airnet50_1x64d_r2', 'airnet50_1x64d_r16', 'airnet101_1x64d_r2', 'AirBlock', 'AirInitBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
class AirBlock(HybridBlock):
"""
AirNet attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int, default 2
Air compression ratio.
in_size : tuple of 2 int, default (None, None)
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
groups=1,
bn_use_global_stats=False,
ratio=2,
in_size=(None, None),
**kwargs):
super(AirBlock, self).__init__(**kwargs)
assert (out_channels % ratio == 0)
mid_channels = out_channels // ratio
self.in_size = in_size
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=self.in_size[0], width=self.in_size[1])
x = self.conv3(x)
x = self.sigmoid(x)
return x
class AirBottleneck(HybridBlock):
"""
AirNet bottleneck block for residual path in AirNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.use_air_block = (strides == 1 and mid_channels < 512)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
def hybrid_forward(self, F, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirUnit(HybridBlock):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = AirBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirInitBlock(HybridBlock):
"""
AirNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(AirInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
return x
class AirNet(HybridBlock):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
ratio: int
Air compression ratio.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
ratio,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AirNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
in_size = tuple([x // 4 for x in in_size])
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(AirUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size))
in_channels = out_channels
in_size = tuple([x // strides for x in in_size])
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_airnet(blocks,
base_channels,
ratio,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNet with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNet(
channels=channels,
init_block_channels=init_block_channels,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def airnet50_1x64d_r2(**kwargs):
"""
AirNet50-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=2, model_name="airnet50_1x64d_r2", **kwargs)
def airnet50_1x64d_r16(**kwargs):
"""
AirNet50-1x64d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=16, model_name="airnet50_1x64d_r16", **kwargs)
def airnet101_1x64d_r2(**kwargs):
"""
AirNet101-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=101, base_channels=64, ratio=2, model_name="airnet101_1x64d_r2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
airnet50_1x64d_r2,
airnet50_1x64d_r16,
airnet101_1x64d_r2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnet50_1x64d_r2 or weight_count == 27425864)
assert (model != airnet50_1x64d_r16 or weight_count == 25714952)
assert (model != airnet101_1x64d_r2 or weight_count == 51727432)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,893 | 32.461053 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mnasnet.py | """
MnasNet for ImageNet-1K, implemented in Gluon.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock
class DwsExpSEResUnit(HybridBlock):
"""
Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution layer.
use_kernel3 : bool, default True
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int, default 1
Expansion factor for each unit.
se_factor : int, default 0
SE reduction factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
activation : str, default 'relu'
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
se_factor=0,
use_skip=True,
bn_use_global_stats=False,
activation="relu",
**kwargs):
super(DwsExpSEResUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_exp_conv = exp_factor > 1
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block
with self.name_scope():
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.dw_conv = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation)
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = x + identity
return x
class MnasInitBlock(HybridBlock):
"""
MnasNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip,
bn_use_global_stats=False,
**kwargs):
super(MnasInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = DwsExpSEResUnit(
in_channels=mid_channels,
out_channels=out_channels,
use_skip=use_skip,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MnasFinalBlock(HybridBlock):
"""
MnasNet specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip,
bn_use_global_stats=False,
**kwargs):
super(MnasFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = DwsExpSEResUnit(
in_channels=in_channels,
out_channels=mid_channels,
exp_factor=6,
use_skip=use_skip,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MnasNet(HybridBlock):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
init_block_use_skip : bool
Whether to use skip connection in the initial unit.
final_block_use_skip : bool
Whether to use skip connection in the final block of the feature extractor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
se_factors,
init_block_use_skip,
final_block_use_skip,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MnasNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(MnasInitBlock(
in_channels=in_channels,
out_channels=init_block_channels[1],
mid_channels=init_block_channels[0],
use_skip=init_block_use_skip,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels[1]
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
stage.add(DwsExpSEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
se_factor=se_factor,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(MnasFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels[1],
mid_channels=final_block_channels[0],
use_skip=final_block_use_skip,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels[1]
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mnasnet(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('b1', 'a1' or 'small').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "b1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]]
se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]]
init_block_use_skip = False
final_block_use_skip = False
elif version == "a1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]]
init_block_use_skip = False
final_block_use_skip = True
elif version == "small":
init_block_channels = [8, 8]
final_block_channels = [144, 1280]
channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]]
exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]]
init_block_use_skip = True
final_block_use_skip = True
else:
raise ValueError("Unsupported MnasNet version {}".format(version))
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
se_factors=se_factors,
init_block_use_skip=init_block_use_skip,
final_block_use_skip=final_block_use_skip,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mnasnet_b1(**kwargs):
"""
MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs)
def mnasnet_a1(**kwargs):
"""
MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs)
def mnasnet_small(**kwargs):
"""
MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mnasnet_b1,
mnasnet_a1,
mnasnet_small,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mnasnet_b1 or weight_count == 4383312)
assert (model != mnasnet_a1 or weight_count == 3887038)
assert (model != mnasnet_small or weight_count == 2030264)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 16,501 | 34.95207 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pyramidnet_cifar.py | """
PyramidNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a48_svhn',
'pyramidnet110_a84_cifar10', 'pyramidnet110_a84_cifar100', 'pyramidnet110_a84_svhn',
'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100', 'pyramidnet110_a270_svhn',
'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet164_a270_bn_svhn',
'pyramidnet200_a240_bn_cifar10', 'pyramidnet200_a240_bn_cifar100', 'pyramidnet200_a240_bn_svhn',
'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100', 'pyramidnet236_a220_bn_svhn',
'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100', 'pyramidnet272_a200_bn_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .preresnet import PreResActivation
from .pyramidnet import PyrUnit
class CIFARPyramidNet(HybridBlock):
"""
PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARPyramidNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_pyramidnet_cifar(classes,
blocks,
alpha,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PyramidNet for CIFAR model with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def pyramidnet110_a48_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar10",
**kwargs)
def pyramidnet110_a48_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar100",
**kwargs)
def pyramidnet110_a48_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_svhn",
**kwargs)
def pyramidnet110_a84_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar10",
**kwargs)
def pyramidnet110_a84_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
def pyramidnet110_a84_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_svhn",
**kwargs)
def pyramidnet110_a270_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar10",
**kwargs)
def pyramidnet110_a270_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar100",
**kwargs)
def pyramidnet110_a270_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_svhn",
**kwargs)
def pyramidnet164_a270_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar10",
**kwargs)
def pyramidnet164_a270_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar100",
**kwargs)
def pyramidnet164_a270_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_svhn",
**kwargs)
def pyramidnet200_a240_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar10",
**kwargs)
def pyramidnet200_a240_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar100",
**kwargs)
def pyramidnet200_a240_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_svhn",
**kwargs)
def pyramidnet236_a220_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar10",
**kwargs)
def pyramidnet236_a220_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar100",
**kwargs)
def pyramidnet236_a220_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_svhn",
**kwargs)
def pyramidnet272_a200_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar10",
**kwargs)
def pyramidnet272_a200_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar100",
**kwargs)
def pyramidnet272_a200_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(pyramidnet110_a48_cifar10, 10),
(pyramidnet110_a48_cifar100, 100),
(pyramidnet110_a48_svhn, 10),
(pyramidnet110_a84_cifar10, 10),
(pyramidnet110_a84_cifar100, 100),
(pyramidnet110_a84_svhn, 10),
(pyramidnet110_a270_cifar10, 10),
(pyramidnet110_a270_cifar100, 100),
(pyramidnet110_a270_svhn, 10),
(pyramidnet164_a270_bn_cifar10, 10),
(pyramidnet164_a270_bn_cifar100, 100),
(pyramidnet164_a270_bn_svhn, 10),
(pyramidnet200_a240_bn_cifar10, 10),
(pyramidnet200_a240_bn_cifar100, 100),
(pyramidnet200_a240_bn_svhn, 10),
(pyramidnet236_a220_bn_cifar10, 10),
(pyramidnet236_a220_bn_cifar100, 100),
(pyramidnet236_a220_bn_svhn, 10),
(pyramidnet272_a200_bn_cifar10, 10),
(pyramidnet272_a200_bn_cifar100, 100),
(pyramidnet272_a200_bn_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, classes=classes)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706)
assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556)
assert (model != pyramidnet110_a48_svhn or weight_count == 1772706)
assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446)
assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536)
assert (model != pyramidnet110_a84_svhn or weight_count == 3904446)
assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477)
assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307)
assert (model != pyramidnet110_a270_svhn or weight_count == 28485477)
assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021)
assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071)
assert (model != pyramidnet164_a270_bn_svhn or weight_count == 27216021)
assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702)
assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952)
assert (model != pyramidnet200_a240_bn_svhn or weight_count == 26752702)
assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046)
assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096)
assert (model != pyramidnet236_a220_bn_svhn or weight_count == 26969046)
assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842)
assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692)
assert (model != pyramidnet272_a200_bn_svhn or weight_count == 26210842)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 25,957 | 33.110381 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/preresnet_cifar.py | """
PreResNet for CIFAR/SVHN, implemented in Gluon.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn',
'preresnet56_cifar10', 'preresnet56_cifar100', 'preresnet56_svhn',
'preresnet110_cifar10', 'preresnet110_cifar100', 'preresnet110_svhn',
'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn',
'preresnet272bn_cifar10', 'preresnet272bn_cifar100', 'preresnet272bn_svhn',
'preresnet542bn_cifar10', 'preresnet542bn_cifar100', 'preresnet542bn_svhn',
'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn',
'preresnet1202_cifar10', 'preresnet1202_cifar100', 'preresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARPreResNet(HybridBlock):
"""
PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_preresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def preresnet20_cifar10(classes=10, **kwargs):
"""
PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10", **kwargs)
def preresnet20_cifar100(classes=100, **kwargs):
"""
PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100",
**kwargs)
def preresnet20_svhn(classes=10, **kwargs):
"""
PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn", **kwargs)
def preresnet56_cifar10(classes=10, **kwargs):
"""
PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10", **kwargs)
def preresnet56_cifar100(classes=100, **kwargs):
"""
PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100",
**kwargs)
def preresnet56_svhn(classes=10, **kwargs):
"""
PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn", **kwargs)
def preresnet110_cifar10(classes=10, **kwargs):
"""
PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10",
**kwargs)
def preresnet110_cifar100(classes=100, **kwargs):
"""
PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar100",
**kwargs)
def preresnet110_svhn(classes=10, **kwargs):
"""
PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn",
**kwargs)
def preresnet164bn_cifar10(classes=10, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar10",
**kwargs)
def preresnet164bn_cifar100(classes=100, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar100",
**kwargs)
def preresnet164bn_svhn(classes=10, **kwargs):
"""
PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn",
**kwargs)
def preresnet272bn_cifar10(classes=10, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar10",
**kwargs)
def preresnet272bn_cifar100(classes=100, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar100",
**kwargs)
def preresnet272bn_svhn(classes=10, **kwargs):
"""
PreResNet-272(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_svhn",
**kwargs)
def preresnet542bn_cifar10(classes=10, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar10",
**kwargs)
def preresnet542bn_cifar100(classes=100, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar100",
**kwargs)
def preresnet542bn_svhn(classes=10, **kwargs):
"""
PreResNet-542(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_svhn",
**kwargs)
def preresnet1001_cifar10(classes=10, **kwargs):
"""
PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar10",
**kwargs)
def preresnet1001_cifar100(classes=100, **kwargs):
"""
PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar100",
**kwargs)
def preresnet1001_svhn(classes=10, **kwargs):
"""
PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_svhn",
**kwargs)
def preresnet1202_cifar10(classes=10, **kwargs):
"""
PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar10",
**kwargs)
def preresnet1202_cifar100(classes=100, **kwargs):
"""
PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar100",
**kwargs)
def preresnet1202_svhn(classes=10, **kwargs):
"""
PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(preresnet20_cifar10, 10),
(preresnet20_cifar100, 100),
(preresnet20_svhn, 10),
(preresnet56_cifar10, 10),
(preresnet56_cifar100, 100),
(preresnet56_svhn, 10),
(preresnet110_cifar10, 10),
(preresnet110_cifar100, 100),
(preresnet110_svhn, 10),
(preresnet164bn_cifar10, 10),
(preresnet164bn_cifar100, 100),
(preresnet164bn_svhn, 10),
(preresnet272bn_cifar10, 10),
(preresnet272bn_cifar100, 100),
(preresnet272bn_svhn, 10),
(preresnet542bn_cifar10, 10),
(preresnet542bn_cifar100, 100),
(preresnet542bn_svhn, 10),
(preresnet1001_cifar10, 10),
(preresnet1001_cifar100, 100),
(preresnet1001_svhn, 10),
(preresnet1202_cifar10, 10),
(preresnet1202_cifar100, 100),
(preresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet20_cifar10 or weight_count == 272282)
assert (model != preresnet20_cifar100 or weight_count == 278132)
assert (model != preresnet20_svhn or weight_count == 272282)
assert (model != preresnet56_cifar10 or weight_count == 855578)
assert (model != preresnet56_cifar100 or weight_count == 861428)
assert (model != preresnet56_svhn or weight_count == 855578)
assert (model != preresnet110_cifar10 or weight_count == 1730522)
assert (model != preresnet110_cifar100 or weight_count == 1736372)
assert (model != preresnet110_svhn or weight_count == 1730522)
assert (model != preresnet164bn_cifar10 or weight_count == 1703258)
assert (model != preresnet164bn_cifar100 or weight_count == 1726388)
assert (model != preresnet164bn_svhn or weight_count == 1703258)
assert (model != preresnet272bn_cifar10 or weight_count == 2816090)
assert (model != preresnet272bn_cifar100 or weight_count == 2839220)
assert (model != preresnet272bn_svhn or weight_count == 2816090)
assert (model != preresnet542bn_cifar10 or weight_count == 5598170)
assert (model != preresnet542bn_cifar100 or weight_count == 5621300)
assert (model != preresnet542bn_svhn or weight_count == 5598170)
assert (model != preresnet1001_cifar10 or weight_count == 10327706)
assert (model != preresnet1001_cifar100 or weight_count == 10350836)
assert (model != preresnet1001_svhn or weight_count == 10327706)
assert (model != preresnet1202_cifar10 or weight_count == 19423834)
assert (model != preresnet1202_cifar100 or weight_count == 19429684)
assert (model != preresnet1202_svhn or weight_count == 19423834)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 26,802 | 36.330084 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/alphapose_coco.py | """
AlphaPose for COCO Keypoint, implemented in Gluon.
Original paper: 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
"""
__all__ = ['AlphaPose', 'alphapose_fastseresnet101b_coco']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import PixelShuffle2D
from .common import conv3x3, DucBlock, HeatmapMaxDetBlock
from .fastseresnet import fastseresnet101b
class AlphaPose(HybridBlock):
"""
AlphaPose model from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
return_heatmap : bool, default False
Whether to return only heatmap.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
return_heatmap=False,
fixed_size=True,
in_channels=3,
in_size=(256, 192),
keypoints=17,
**kwargs):
super(AlphaPose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
with self.name_scope():
self.backbone = backbone
self.decoder = nn.HybridSequential(prefix="")
self.decoder.add(PixelShuffle2D(factor=2))
in_channels = backbone_out_channels // 4
for out_channels in channels:
self.decoder.add(DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.decoder.add(conv3x3(
in_channels=in_channels,
out_channels=keypoints,
use_bias=True))
self.heatmap_max_det = HeatmapMaxDetBlock(
channels=keypoints,
in_size=(in_size[0] // 4, in_size[1] // 4),
fixed_size=fixed_size)
def hybrid_forward(self, F, x):
x = self.backbone(x)
# return self.decoder[0](x)
# return self.decoder[1](self.decoder[0](x))
# print(y[0, 0].asnumpy())
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_alphapose(backbone,
backbone_out_channels,
keypoints,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AlphaPose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [256, 128]
net = AlphaPose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def alphapose_fastseresnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
AlphaPose model on the base of ResNet-101b for COCO Keypoint from 'RMPE: Regional Multi-person Pose Estimation,'
https://arxiv.org/abs/1612.00137.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = fastseresnet101b(pretrained=pretrained_backbone).features[:-1]
return get_alphapose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="alphapose_fastseresnet101b_coco", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
alphapose_fastseresnet101b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alphapose_fastseresnet101b_coco or weight_count == 59569873)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 7,299 | 32.953488 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pyramidnet.py | """
PyramidNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResActivation
class PyrBlock(HybridBlock):
"""
Simple PyramidNet block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(PyrBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PyrBottleneck(HybridBlock):
"""
PyramidNet bottleneck block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(PyrBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PyrUnit(HybridBlock):
"""
PyramidNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(PyrUnit, self).__init__(**kwargs)
assert (out_channels >= in_channels)
self.resize_identity = (strides != 1)
self.identity_pad_width = out_channels - in_channels
with self.name_scope():
if bottleneck:
self.body = PyrBottleneck(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
strides=strides)
else:
self.body = PyrBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
strides=strides)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_pool = nn.AvgPool2D(
pool_size=2,
strides=strides,
ceil_mode=True)
def hybrid_forward(self, F, x):
identity = x
x = self.body(x)
x = self.bn(x)
if self.resize_identity:
identity = self.identity_pool(identity)
if self.identity_pad_width > 0:
identity = F.concat(
identity,
F.zeros_like(F.slice_axis(x, axis=1, begin=0, end=self.identity_pad_width)),
dim=1)
x = x + identity
return x
class PyrInitBlock(HybridBlock):
"""
PyramidNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(PyrInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PyramidNet(HybridBlock):
"""
PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(PyramidNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PyrInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_pyramidnet(blocks,
alpha,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PyramidNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if blocks < 50:
bottleneck = False
else:
bottleneck = True
channels = [[cij * 4 for cij in ci] for ci in channels]
net = PyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def pyramidnet101_a360(**kwargs):
"""
PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet(blocks=101, alpha=360, model_name="pyramidnet101_a360", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
pyramidnet101_a360,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet101_a360 or weight_count == 42455070)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,534 | 31.149644 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnet.py | """
SE-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26',
'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b',
'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, SEBlock
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class SEResUnit(HybridBlock):
"""
SE-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
conv1_stride,
**kwargs):
super(SEResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNet(HybridBlock):
"""
SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def seresnet10(**kwargs):
"""
SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=10, model_name="seresnet10", **kwargs)
def seresnet12(**kwargs):
"""
SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=12, model_name="seresnet12", **kwargs)
def seresnet14(**kwargs):
"""
SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
def seresnet16(**kwargs):
"""
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
def seresnet18(**kwargs):
"""
SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=18, model_name="seresnet18", **kwargs)
def seresnet26(**kwargs):
"""
SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs)
def seresnetbc26b(**kwargs):
"""
SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs)
def seresnet34(**kwargs):
"""
SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=34, model_name="seresnet34", **kwargs)
def seresnetbc38b(**kwargs):
"""
SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs)
def seresnet50(**kwargs):
"""
SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, model_name="seresnet50", **kwargs)
def seresnet50b(**kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs)
def seresnet101(**kwargs):
"""
SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, model_name="seresnet101", **kwargs)
def seresnet101b(**kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs)
def seresnet152(**kwargs):
"""
SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, model_name="seresnet152", **kwargs)
def seresnet152b(**kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs)
def seresnet200(**kwargs):
"""
SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, model_name="seresnet200", **kwargs)
def seresnet200b(**kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
seresnet10,
seresnet12,
seresnet14,
seresnet16,
seresnet18,
seresnet26,
seresnetbc26b,
seresnet34,
seresnetbc38b,
seresnet50,
seresnet50b,
seresnet101,
seresnet101b,
seresnet152,
seresnet152b,
seresnet200,
seresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10 or weight_count == 5463332)
assert (model != seresnet12 or weight_count == 5537896)
assert (model != seresnet14 or weight_count == 5835504)
assert (model != seresnet16 or weight_count == 7024640)
assert (model != seresnet18 or weight_count == 11778592)
assert (model != seresnet26 or weight_count == 18093852)
assert (model != seresnetbc26b or weight_count == 17395976)
assert (model != seresnet34 or weight_count == 21958868)
assert (model != seresnetbc38b or weight_count == 24026616)
assert (model != seresnet50 or weight_count == 28088024)
assert (model != seresnet50b or weight_count == 28088024)
assert (model != seresnet101 or weight_count == 49326872)
assert (model != seresnet101b or weight_count == 49326872)
assert (model != seresnet152 or weight_count == 66821848)
assert (model != seresnet152b or weight_count == 66821848)
assert (model != seresnet200 or weight_count == 71835864)
assert (model != seresnet200b or weight_count == 71835864)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,802 | 33.385124 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnet_cub.py | """
SE-ResNet for CUB-200-2011, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['seresnet10_cub', 'seresnet12_cub', 'seresnet14_cub', 'seresnetbc14b_cub', 'seresnet16_cub',
'seresnet18_cub', 'seresnet26_cub', 'seresnetbc26b_cub', 'seresnet34_cub', 'seresnetbc38b_cub',
'seresnet50_cub', 'seresnet50b_cub', 'seresnet101_cub', 'seresnet101b_cub', 'seresnet152_cub',
'seresnet152b_cub', 'seresnet200_cub', 'seresnet200b_cub']
from .seresnet import get_seresnet
def seresnet10_cub(classes=200, **kwargs):
"""
SE-ResNet-10 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=10, model_name="seresnet10_cub", **kwargs)
def seresnet12_cub(classes=200, **kwargs):
"""
SE-ResNet-12 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=12, model_name="seresnet12_cub", **kwargs)
def seresnet14_cub(classes=200, **kwargs):
"""
SE-ResNet-14 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=14, model_name="seresnet14_cub", **kwargs)
def seresnetbc14b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-14b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="seresnetbc14b_cub",
**kwargs)
def seresnet16_cub(classes=200, **kwargs):
"""
SE-ResNet-16 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=16, model_name="seresnet16_cub", **kwargs)
def seresnet18_cub(classes=200, **kwargs):
"""
SE-ResNet-18 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=18, model_name="seresnet18_cub", **kwargs)
def seresnet26_cub(classes=200, **kwargs):
"""
SE-ResNet-26 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=26, bottleneck=False, model_name="seresnet26_cub", **kwargs)
def seresnetbc26b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-26b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b_cub",
**kwargs)
def seresnet34_cub(classes=200, **kwargs):
"""
SE-ResNet-34 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=34, model_name="seresnet34_cub", **kwargs)
def seresnetbc38b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-38b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b_cub",
**kwargs)
def seresnet50_cub(classes=200, **kwargs):
"""
SE-ResNet-50 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=50, model_name="seresnet50_cub", **kwargs)
def seresnet50b_cub(classes=200, **kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=50, conv1_stride=False, model_name="seresnet50b_cub", **kwargs)
def seresnet101_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, model_name="seresnet101_cub", **kwargs)
def seresnet101b_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs)
def seresnet152_cub(classes=200, **kwargs):
"""
SE-ResNet-152 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=152, model_name="seresnet152_cub", **kwargs)
def seresnet152b_cub(classes=200, **kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=152, conv1_stride=False, model_name="seresnet152b_cub", **kwargs)
def seresnet200_cub(classes=200, **kwargs):
"""
SE-ResNet-200 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=200, model_name="seresnet200_cub", **kwargs)
def seresnet200b_cub(classes=200, **kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=200, conv1_stride=False, model_name="seresnet200b_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
seresnet10_cub,
seresnet12_cub,
seresnet14_cub,
seresnetbc14b_cub,
seresnet16_cub,
seresnet18_cub,
seresnet26_cub,
seresnetbc26b_cub,
seresnet34_cub,
seresnetbc38b_cub,
seresnet50_cub,
seresnet50b_cub,
seresnet101_cub,
seresnet101b_cub,
seresnet152_cub,
seresnet152b_cub,
seresnet200_cub,
seresnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10_cub or weight_count == 5052932)
assert (model != seresnet12_cub or weight_count == 5127496)
assert (model != seresnet14_cub or weight_count == 5425104)
assert (model != seresnetbc14b_cub or weight_count == 9126136)
assert (model != seresnet16_cub or weight_count == 6614240)
assert (model != seresnet18_cub or weight_count == 11368192)
assert (model != seresnet26_cub or weight_count == 17683452)
assert (model != seresnetbc26b_cub or weight_count == 15756776)
assert (model != seresnet34_cub or weight_count == 21548468)
assert (model != seresnetbc38b_cub or weight_count == 22387416)
assert (model != seresnet50_cub or weight_count == 26448824)
assert (model != seresnet50b_cub or weight_count == 26448824)
assert (model != seresnet101_cub or weight_count == 47687672)
assert (model != seresnet101b_cub or weight_count == 47687672)
assert (model != seresnet152_cub or weight_count == 65182648)
assert (model != seresnet152b_cub or weight_count == 65182648)
assert (model != seresnet200_cub or weight_count == 70196664)
assert (model != seresnet200b_cub or weight_count == 70196664)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 200))
if __name__ == "__main__":
_test()
| 15,666 | 36.037825 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/densenet.py | """
DenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'DenseUnit', 'TransitionBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
class DenseUnit(HybridBlock):
"""
DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(DenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class TransitionBlock(HybridBlock):
"""
DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class DenseNet(HybridBlock):
"""
DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(DenseUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_densenet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = DenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def densenet121(**kwargs):
"""
DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=121, model_name="densenet121", **kwargs)
def densenet161(**kwargs):
"""
DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=161, model_name="densenet161", **kwargs)
def densenet169(**kwargs):
"""
DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=169, model_name="densenet169", **kwargs)
def densenet201(**kwargs):
"""
DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=201, model_name="densenet201", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
densenet121,
densenet161,
densenet169,
densenet201,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet121 or weight_count == 7978856)
assert (model != densenet161 or weight_count == 28681000)
assert (model != densenet169 or weight_count == 14149480)
assert (model != densenet201 or weight_count == 20013928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,737 | 32.346591 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnext.py | """
SE-ResNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, SEBlock
from .resnet import ResInitBlock
from .resnext import ResNeXtBottleneck
class SEResNeXtUnit(HybridBlock):
"""
SE-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(SEResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNeXt(HybridBlock):
"""
SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SEResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_seresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def seresnext50_32x4d(**kwargs):
"""
SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs)
def seresnext101_32x4d(**kwargs):
"""
SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs)
def seresnext101_64x4d(**kwargs):
"""
SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
seresnext50_32x4d,
seresnext101_32x4d,
seresnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnext50_32x4d or weight_count == 27559896)
assert (model != seresnext101_32x4d or weight_count == 48955416)
assert (model != seresnext101_64x4d or weight_count == 88232984)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,912 | 32.265101 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/darts.py | """
DARTS for ImageNet-1K, implemented in Gluon.
Original paper: 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
"""
__all__ = ['DARTS', 'darts']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1
from .nasnet import nasnet_dual_path_sequential
class DwsConv(HybridBlock):
"""
Standard dilated depthwise separable convolution block with.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layers use a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
use_bias=False,
**kwargs):
super(DwsConv, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = nn.Conv2D(
channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=in_channels,
use_bias=use_bias,
in_channels=in_channels)
self.pw_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class DartsConv(HybridBlock):
"""
DARTS specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
activate=True,
**kwargs):
super(DartsConv, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
if self.activate:
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(in_channels=out_channels)
def hybrid_forward(self, F, x):
if self.activate:
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
def darts_conv1x1(in_channels,
out_channels,
activate=True):
"""
1x1 version of the DARTS specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
"""
return DartsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
activate=activate)
def darts_conv3x3_s2(in_channels,
out_channels,
activate=True):
"""
3x3 version of the DARTS specific convolution block with stride 2.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
"""
return DartsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
activate=activate)
class DartsDwsConv(HybridBlock):
"""
DARTS specific dilated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
**kwargs):
super(DartsDwsConv, self).__init__(**kwargs)
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=False)
self.bn = nn.BatchNorm(in_channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
class DartsDwsBranch(HybridBlock):
"""
DARTS specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
**kwargs):
super(DartsDwsBranch, self).__init__(**kwargs)
mid_channels = in_channels
with self.name_scope():
self.conv1 = DartsDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=1)
self.conv2 = DartsDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
dilation=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DartsReduceBranch(HybridBlock):
"""
DARTS specific factorized reduce block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides=2,
**kwargs):
super(DartsReduceBranch, self).__init__(**kwargs)
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides)
self.bn = nn.BatchNorm(in_channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x1 = self.conv1(x)
x = F.slice(x, begin=(None, None, 1, 1), end=(None, None, None, None))
x2 = self.conv2(x)
x = F.concat(x1, x2, dim=1)
x = self.bn(x)
return x
class Stem1Unit(HybridBlock):
"""
DARTS Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = darts_conv3x3_s2(
in_channels=in_channels,
out_channels=mid_channels,
activate=False)
self.conv2 = darts_conv3x3_s2(
in_channels=mid_channels,
out_channels=out_channels,
activate=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def stem2_unit(in_channels,
out_channels):
"""
DARTS Stem2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return darts_conv3x3_s2(
in_channels=in_channels,
out_channels=out_channels,
activate=True)
def darts_maxpool3x3(channels,
strides):
"""
DARTS specific 3x3 Max pooling layer.
Parameters:
----------
channels : int
Number of input/output channels. Unused parameter.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
assert (channels > 0)
return nn.MaxPool2D(
pool_size=3,
strides=strides,
padding=1)
def darts_skip_connection(channels,
strides):
"""
DARTS specific skip connection layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
assert (channels > 0)
if strides == 1:
return Identity()
else:
assert (strides == 2)
return DartsReduceBranch(
in_channels=channels,
out_channels=channels,
strides=strides)
def darts_dws_conv3x3(channels,
strides):
"""
3x3 version of DARTS specific dilated convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return DartsDwsConv(
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=2,
dilation=2)
def darts_dws_branch3x3(channels,
strides):
"""
3x3 version of DARTS specific dilated convolution branch.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return DartsDwsBranch(
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1)
# Set of operations in genotype.
GENOTYPE_OPS = {
'max_pool_3x3': darts_maxpool3x3,
'skip_connect': darts_skip_connection,
'dil_conv_3x3': darts_dws_conv3x3,
'sep_conv_3x3': darts_dws_branch3x3,
}
class DartsMainBlock(HybridBlock):
"""
DARTS main block, described by genotype.
Parameters:
----------
genotype : list of tuples (str, int)
List of genotype elements (operations and linked indices).
channels : int
Number of input/output channels.
reduction : bool
Whether use reduction.
"""
def __init__(self,
genotype,
channels,
reduction,
**kwargs):
super(DartsMainBlock, self).__init__(**kwargs)
self.concat = [2, 3, 4, 5]
op_names, indices = zip(*genotype)
self.indices = indices
self.steps = len(op_names) // 2
with self.name_scope():
for i, (name, index) in enumerate(zip(op_names, indices)):
stride = 2 if reduction and index < 2 else 1
setattr(self, "ops{}".format(i + 1), GENOTYPE_OPS[name](channels, stride))
def hybrid_forward(self, F, x, x_prev):
s0 = x_prev
s1 = x
states = [s0, s1]
for i in range(self.steps):
j1 = 2 * i
j2 = 2 * i + 1
op1 = getattr(self, "ops{}".format(j1 + 1))
op2 = getattr(self, "ops{}".format(j2 + 1))
y1 = states[self.indices[j1]]
y2 = states[self.indices[j2]]
y1 = op1(y1)
y2 = op2(y2)
s = y1 + y2
states += [s]
x_out = F.concat(*[states[i] for i in self.concat], dim=1)
return x_out
class DartsUnit(HybridBlock):
"""
DARTS unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
genotype : list of tuples (str, int)
List of genotype elements (operations and linked indices).
reduction : bool
Whether use reduction.
prev_reduction : bool
Whether use previous reduction.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
genotype,
reduction,
prev_reduction,
**kwargs):
super(DartsUnit, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
if prev_reduction:
self.preprocess_prev = DartsReduceBranch(
in_channels=prev_in_channels,
out_channels=mid_channels)
else:
self.preprocess_prev = darts_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.preprocess = darts_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.body = DartsMainBlock(
genotype=genotype,
channels=mid_channels,
reduction=reduction)
def hybrid_forward(self, F, x, x_prev):
x = self.preprocess(x)
x_prev = self.preprocess_prev(x_prev)
x_out = self.body(x, x_prev)
return x_out
class DARTS(HybridBlock):
"""
DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
stem_blocks_channels : int
Number of output channels for the Stem units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
stem_blocks_channels,
normal_genotype,
reduce_genotype,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DARTS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=1)
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=stem_blocks_channels))
in_channels = stem_blocks_channels
self.features.add(stem2_unit(
in_channels=in_channels,
out_channels=stem_blocks_channels))
prev_in_channels = in_channels
in_channels = stem_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
reduction = (i != 0) and (j == 0)
prev_reduction = ((i == 0) and (j == 0)) or ((i != 0) and (j == 1))
genotype = reduce_genotype if reduction else normal_genotype
stage.add(DartsUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
genotype=genotype,
reduction=reduction,
prev_reduction=prev_reduction))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_darts(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DARTS model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
stem_blocks_channels = 48
layers = [4, 5, 5]
channels_per_layers = [192, 384, 768]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
normal_genotype = [
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 1),
('skip_connect', 0),
('skip_connect', 0),
('dil_conv_3x3', 2)]
reduce_genotype = [
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('skip_connect', 2),
('max_pool_3x3', 1),
('max_pool_3x3', 0),
('skip_connect', 2),
('skip_connect', 2),
('max_pool_3x3', 1)]
net = DARTS(
channels=channels,
stem_blocks_channels=stem_blocks_channels,
normal_genotype=normal_genotype,
reduce_genotype=reduce_genotype,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def darts(**kwargs):
"""
DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_darts(model_name="darts", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
darts,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darts or weight_count == 4718752)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,552 | 27.969086 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/drn.py | """
DRN for ImageNet-1K, implemented in Gluon.
Original paper: 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
"""
__all__ = ['DRN', 'drnc26', 'drnc42', 'drnc58', 'drnd22', 'drnd38', 'drnd54', 'drnd105']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class DRNConv(HybridBlock):
"""
DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
bn_use_global_stats,
activate,
**kwargs):
super(DRNConv, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def drn_conv1x1(in_channels,
out_channels,
strides,
bn_use_global_stats,
activate,
**kwargs):
"""
1x1 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
dilation=1,
bn_use_global_stats=bn_use_global_stats,
activate=activate,
**kwargs)
def drn_conv3x3(in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
activate,
**kwargs):
"""
3x3 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=activate,
**kwargs)
class DRNBlock(HybridBlock):
"""
Simple DRN block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(DRNBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.conv2 = drn_conv3x3(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=False)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DRNBottleneck(HybridBlock):
"""
DRN bottleneck block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(DRNBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = drn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.conv2 = drn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.conv3 = drn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
bn_use_global_stats=bn_use_global_stats,
activate=False)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DRNUnit(HybridBlock):
"""
DRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
simplified : bool
Whether to use a simple or simplified block in units.
residual : bool
Whether do residual calculations.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
bottleneck,
simplified,
residual,
**kwargs):
super(DRNUnit, self).__init__(**kwargs)
assert residual or (not bottleneck)
assert (not (bottleneck and simplified))
assert (not (residual and simplified))
self.residual = residual
self.resize_identity = ((in_channels != out_channels) or (strides != 1)) and self.residual and (not simplified)
with self.name_scope():
if bottleneck:
self.body = DRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
elif simplified:
self.body = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=False)
else:
self.body = DRNBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = drn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.residual:
x = x + identity
x = self.activ(x)
return x
def drn_init_block(in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
"""
DRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=1,
padding=3,
dilation=1,
bn_use_global_stats=bn_use_global_stats,
activate=True,
**kwargs)
class DRN(HybridBlock):
"""
DRN-C&D model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilation values for 3x3 convolution layers for each unit.
bottlenecks : list of list of int
Whether to use a bottleneck or simple block in each unit.
simplifieds : list of list of int
Whether to use a simple or simplified block in each unit.
residuals : list of list of int
Whether to use residual block in each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bottlenecks,
simplifieds,
residuals,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(drn_init_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DRNUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilations[i][j],
bn_use_global_stats=bn_use_global_stats,
bottleneck=(bottlenecks[i][j] == 1),
simplified=(simplifieds[i][j] == 1),
residual=(residuals[i][j] == 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=28,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Conv2D(
channels=classes,
kernel_size=1,
in_channels=in_channels))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_drn(blocks,
simplified=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DRN-C or DRN-D model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
simplified : bool, default False
Whether to use simplified scheme (D architecture).
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 22:
assert simplified
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 26:
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 38:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 42:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 54:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 58:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 105:
assert simplified
layers = [1, 1, 3, 4, 23, 3, 1, 1]
else:
raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks))
if blocks < 50:
channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512]
bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
else:
channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512]
bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
if simplified:
simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1]
residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
else:
simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0]
dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1]
downsample = [0, 1, 1, 1, 0, 0, 0, 0]
def expand(property_per_layers):
from functools import reduce
return reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(property_per_layers, layers, downsample),
[[]])
channels = expand(channels_per_layers)
dilations = expand(dilations_per_layers)
bottlenecks = expand(bottlenecks_per_layers)
residuals = expand(residuals_per_layers)
simplifieds = expand(simplifieds_per_layers)
init_block_channels = channels_per_layers[0]
net = DRN(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bottlenecks=bottlenecks,
simplifieds=simplifieds,
residuals=residuals,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def drnc26(**kwargs):
"""
DRN-C-26 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=26, model_name="drnc26", **kwargs)
def drnc42(**kwargs):
"""
DRN-C-42 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=42, model_name="drnc42", **kwargs)
def drnc58(**kwargs):
"""
DRN-C-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=58, model_name="drnc58", **kwargs)
def drnd22(**kwargs):
"""
DRN-D-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=22, simplified=True, model_name="drnd22", **kwargs)
def drnd38(**kwargs):
"""
DRN-D-38 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=38, simplified=True, model_name="drnd38", **kwargs)
def drnd54(**kwargs):
"""
DRN-D-54 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=54, simplified=True, model_name="drnd54", **kwargs)
def drnd105(**kwargs):
"""
DRN-D-105 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=105, simplified=True, model_name="drnd105", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
drnc26,
drnc42,
drnc58,
drnd22,
drnd38,
drnd54,
drnd105,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != drnc26 or weight_count == 21126584)
assert (model != drnc42 or weight_count == 31234744)
assert (model != drnc58 or weight_count == 40542008) # 41591608
assert (model != drnd22 or weight_count == 16393752)
assert (model != drnd38 or weight_count == 26501912)
assert (model != drnd54 or weight_count == 35809176)
assert (model != drnd105 or weight_count == 54801304)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 22,644 | 31.35 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mixnet.py | """
MixNet for ImageNet-1K, implemented in Gluon.
Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
"""
__all__ = ['MixNet', 'mixnet_s', 'mixnet_m', 'mixnet_l']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, get_activation_layer, split, conv1x1_block, conv3x3_block, dwconv3x3_block, SEBlock
class MixConv(HybridBlock):
"""
Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
axis=1,
**kwargs):
super(MixConv, self).__init__(**kwargs)
kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
padding = padding if isinstance(padding, list) else [padding]
kernel_count = len(kernel_size)
self.splitted_in_channels = self.split_channels(in_channels, kernel_count)
splitted_out_channels = self.split_channels(out_channels, kernel_count)
self.axis = axis
with self.name_scope():
for i, kernel_size_i in enumerate(kernel_size):
in_channels_i = self.splitted_in_channels[i]
out_channels_i = splitted_out_channels[i]
padding_i = padding[i]
self.register_child(
nn.Conv2D(
channels=out_channels_i,
kernel_size=kernel_size_i,
strides=strides,
padding=padding_i,
dilation=dilation,
groups=(out_channels_i if out_channels == groups else groups),
use_bias=use_bias,
in_channels=in_channels_i))
def hybrid_forward(self, F, x):
xx = split(x, sizes=self.splitted_in_channels, axis=self.axis)
out = [conv_i(x_i) for x_i, conv_i in zip(xx, self._children.values())]
x = F.concat(*out, dim=self.axis)
return x
@staticmethod
def split_channels(channels, kernel_count):
splitted_channels = [channels // kernel_count] * kernel_count
splitted_channels[0] += channels - sum(splitted_channels)
return splitted_channels
class MixConvBlock(HybridBlock):
"""
Mixed convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(MixConvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.name_scope():
self.conv = MixConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias)
if self.use_bn:
self.bn = nn.BatchNorm(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def mixconv1x1_block(in_channels,
out_channels,
kernel_count,
strides=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
1x1 version of the mixed convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_count : int
Kernel count.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str, or None, default nn.Activation("relu")
Activation function or name of activation function.
"""
return MixConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=([1] * kernel_count),
strides=strides,
padding=([0] * kernel_count),
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
**kwargs)
class MixUnit(HybridBlock):
"""
MixNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_kernel_count : int
Expansion convolution kernel count for each unit.
conv1_kernel_count : int
Conv1 kernel count for each unit.
conv2_kernel_count : int
Conv2 kernel count for each unit.
exp_factor : int
Expansion factor for each unit.
se_factor : int
SE reduction factor for each unit.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
exp_kernel_count,
conv1_kernel_count,
conv2_kernel_count,
exp_factor,
se_factor,
bn_use_global_stats,
activation,
**kwargs):
super(MixUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
assert (se_factor >= 0)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
self.use_exp_conv = exp_factor > 1
with self.name_scope():
if self.use_exp_conv:
if exp_kernel_count == 1:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.exp_conv = mixconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
kernel_count=exp_kernel_count,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if conv1_kernel_count == 1:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.conv1 = MixConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)],
strides=strides,
padding=[1 + i for i in range(conv1_kernel_count)],
groups=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation)
if conv2_kernel_count == 1:
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.conv2 = mixconv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
kernel_count=conv2_kernel_count,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class MixInitBlock(HybridBlock):
"""
MixNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(MixInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = MixUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
exp_kernel_count=1,
conv1_kernel_count=1,
conv2_kernel_count=1,
exp_factor=1,
se_factor=0,
bn_use_global_stats=bn_use_global_stats,
activation="relu")
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MixNet(HybridBlock):
"""
MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
exp_kernel_counts : list of list of int
Expansion convolution kernel count for each unit.
conv1_kernel_counts : list of list of int
Conv1 kernel count for each unit.
conv2_kernel_counts : list of list of int
Conv2 kernel count for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
exp_kernel_counts,
conv1_kernel_counts,
conv2_kernel_counts,
exp_factors,
se_factors,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MixNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(MixInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 3)) or\
((j == len(channels_per_stage) // 2) and (i == 3)) else 1
exp_kernel_count = exp_kernel_counts[i][j]
conv1_kernel_count = conv1_kernel_counts[i][j]
conv2_kernel_count = conv2_kernel_counts[i][j]
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
activation = "relu" if i == 0 else "swish"
stage.add(MixUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
exp_kernel_count=exp_kernel_count,
conv1_kernel_count=conv1_kernel_count,
conv2_kernel_count=conv2_kernel_count,
exp_factor=exp_factor,
se_factor=se_factor,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mixnet(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MixNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "s":
init_block_channels = 16
channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], [120, 120, 120, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], [2, 2, 2, 1, 1, 1]]
conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], [3, 4, 4, 5, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], [2, 2, 2, 1, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]]
elif version == "m":
init_block_channels = 24
channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], [120, 120, 120, 120, 200, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 1, 1, 1]]
conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], [1, 4, 4, 4, 4, 4, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 2, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 3, 3, 3, 6, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [2, 2, 2, 2, 2, 2, 2, 2]]
else:
raise ValueError("Unsupported MixNet version {}".format(version))
final_block_channels = 1536
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MixNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
exp_kernel_counts=exp_kernel_counts,
conv1_kernel_counts=conv1_kernel_counts,
conv2_kernel_counts=conv2_kernel_counts,
exp_factors=exp_factors,
se_factors=se_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mixnet_s(**kwargs):
"""
MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="s", width_scale=1.0, model_name="mixnet_s", **kwargs)
def mixnet_m(**kwargs):
"""
MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs)
def mixnet_l(**kwargs):
"""
MixNet-L model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.3, model_name="mixnet_l", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mixnet_s,
mixnet_m,
mixnet_l,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mixnet_s or weight_count == 4134606)
assert (model != mixnet_m or weight_count == 5014382)
assert (model != mixnet_l or weight_count == 7329252)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,494 | 35.826019 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/dabnet.py | """
DABNet for image segmentation, implemented in Gluon.
Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
"""
__all__ = ['DABNet', 'dabnet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\
DualPathSequential, PReLU2
class DwaConvBlock(HybridBlock):
"""
Depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DwaConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=(dilation, 1),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
self.conv2 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=(1, dilation),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dwa_conv3x3_block(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 version of the depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return DwaConvBlock(
channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
class DABBlock(HybridBlock):
"""
DABNet specific base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for a dilated branch in the unit.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABBlock, self).__init__(**kwargs)
mid_channels = channels // 2
with self.name_scope():
self.norm_activ1 = NormActivation(
in_channels=channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(channels)))
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.branches = Concurrent(stack=True)
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
padding=dilation,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.norm_activ2 = NormActivation(
in_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels)
def hybrid_forward(self, F, x):
identity = x
x = self.norm_activ1(x)
x = self.conv1(x)
x = self.branches(x)
x = x.sum(axis=1)
x = self.norm_activ2(x)
x = self.conv2(x)
x = x + identity
return x
class DownBlock(HybridBlock):
"""
DABNet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DownBlock, self).__init__(**kwargs)
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
with self.name_scope():
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=2)
if self.expand:
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = F.concat(y, z, dim=1)
y = self.norm_activ(y)
return y
class DABUnit(HybridBlock):
"""
DABNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABUnit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.down = DownBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i, dilation in enumerate(dilations):
self.blocks.add(DABBlock(
channels=mid_channels,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
x = self.down(x)
y = self.blocks(x)
x = F.concat(y, x, dim=1)
return x
class DABStage(HybridBlock):
"""
DABNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABStage, self).__init__(**kwargs)
self.use_unit = (len(dilations) > 0)
with self.name_scope():
self.x_down = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
if self.use_unit:
self.unit = DABUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
dilations=dilations,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(y_out_channels)))
def hybrid_forward(self, F, y, x):
x = self.x_down(x)
if self.use_unit:
y = self.unit(y)
y = F.concat(y, x, dim=1)
y = self.norm_activ(y)
return y, x
class DABInitBlock(HybridBlock):
"""
DABNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DABNet(HybridBlock):
"""
DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilations for blocks.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(DABNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add(DABInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = init_block_channels
for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)):
self.features.add(DABStage(
x_channels=in_channels,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
dilations=dilations_i,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, in_size)
return y
def get_dabnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DABNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
channels = [35, 131, 259]
dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]]
bn_epsilon = 1e-3
net = DABNet(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def dabnet_cityscapes(classes=19, **kwargs):
"""
DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
dabnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dabnet_cityscapes or weight_count == 756643)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 22,095 | 32.682927 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/cgnet.py | """
CGNet for image segmentation, implemented in Gluon.
Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
"""
__all__ = ['CGNet', 'cgnet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\
DualPathSequential, InterpolationBlock, PReLU2
class CGBlock(HybridBlock):
"""
CGNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
down : bool
Whether to downsample.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
dilation,
se_reduction,
down,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGBlock, self).__init__(**kwargs)
self.down = down
if self.down:
mid1_channels = out_channels
mid2_channels = 2 * out_channels
else:
mid1_channels = out_channels // 2
mid2_channels = out_channels
with self.name_scope():
if self.down:
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
else:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid1_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid1_channels)))
self.branches = Concurrent()
self.branches.add(depthwise_conv3x3(channels=mid1_channels))
self.branches.add(depthwise_conv3x3(
channels=mid1_channels,
padding=dilation,
dilation=dilation))
self.norm_activ = NormActivation(
in_channels=mid2_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid2_channels)))
if self.down:
self.conv2 = conv1x1(
in_channels=mid2_channels,
out_channels=out_channels)
self.se = SEBlock(
channels=out_channels,
reduction=se_reduction,
use_conv=False)
def hybrid_forward(self, F, x):
if not self.down:
identity = x
x = self.conv1(x)
x = self.branches(x)
x = self.norm_activ(x)
if self.down:
x = self.conv2(x)
x = self.se(x)
if not self.down:
x = x + identity
return x
class CGUnit(HybridBlock):
"""
CGNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
layers : int
Number of layers.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
layers,
dilation,
se_reduction,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGUnit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.down = CGBlock(
in_channels=in_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i in range(layers - 1):
self.blocks.add(CGBlock(
in_channels=mid_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=False,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
x = self.down(x)
y = self.blocks(x)
x = F.concat(y, x, dim=1) # NB: This differs from the original implementation.
return x
class CGStage(HybridBlock):
"""
CGNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
layers : int
Number of layers in the unit.
dilation : int
Dilation for blocks.
se_reduction : int
SE-block reduction value for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
layers,
dilation,
se_reduction,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGStage, self).__init__(**kwargs)
self.use_x = (x_channels > 0)
self.use_unit = (layers > 0)
with self.name_scope():
if self.use_x:
self.x_down = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
if self.use_unit:
self.unit = CGUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
layers=layers,
dilation=dilation,
se_reduction=se_reduction,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(y_out_channels)))
def hybrid_forward(self, F, y, x=None):
if self.use_unit:
y = self.unit(y)
if self.use_x:
x = self.x_down(x)
y = F.concat(y, x, dim=1)
y = self.norm_activ(y)
return y, x
class CGInitBlock(HybridBlock):
"""
CGNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class CGNet(HybridBlock):
"""
CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of int
Dilations for each unit.
se_reductions : list of int
SE-block reduction value for each unit.
cut_x : list of int
Whether to concatenate with x-branch for each unit.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
layers,
channels,
init_block_channels,
dilations,
se_reductions,
cut_x,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(CGNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add(CGInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = init_block_channels
for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)):
self.features.add(CGStage(
x_channels=in_channels if cut_x[i] == 1 else 0,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
layers=layers_i,
dilation=dilations[i],
se_reduction=se_reductions[i],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, in_size)
return y
def get_cgnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CGNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [0, 3, 21]
channels = [35, 131, 256]
dilations = [0, 2, 4]
se_reductions = [0, 8, 16]
cut_x = [1, 1, 0]
bn_epsilon = 1e-3
net = CGNet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
se_reductions=se_reductions,
cut_x=cut_x,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def cgnet_cityscapes(classes=19, **kwargs):
"""
CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_cgnet(classes=classes, model_name="cgnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
cgnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cgnet_cityscapes or weight_count == 496306)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 17,360 | 32.068571 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/wrn1bit_cifar.py | """
WRN-1bit for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Training wide residual networks for deployment using a single bit for each weight,'
https://arxiv.org/abs/1802.08530.
"""
__all__ = ['CIFARWRN1bit', 'wrn20_10_1bit_cifar10', 'wrn20_10_1bit_cifar100', 'wrn20_10_1bit_svhn',
'wrn20_10_32bit_cifar10', 'wrn20_10_32bit_cifar100', 'wrn20_10_32bit_svhn']
import os
import math
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class Binarize(mx.autograd.Function):
"""
Fake sign op for 1-bit weights.
"""
def forward(self, x):
return math.sqrt(2.0 / (x.shape[1] * x.shape[2] * x.shape[3])) * x.sign()
def backward(self, dy):
return dy
class Conv2D1bit(nn.Conv2D):
"""
Standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
binarized=False,
**kwargs):
super(Conv2D1bit, self).__init__(
in_channels=in_channels,
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
**kwargs)
self.binarized = binarized
def hybrid_forward(self, F, x, weight, bias=None):
weight_1bit = Binarize()(weight) if self.binarized else weight
bias_1bit = Binarize()(bias) if bias is not None and self.binarized else bias
return super(Conv2D1bit, self).hybrid_forward(F, x, weight=weight_1bit, bias=bias_1bit)
def conv1x1_1bit(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
binarized=False):
"""
Convolution 1x1 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
binarized=binarized)
def conv3x3_1bit(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
binarized=False):
"""
Convolution 3x3 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
binarized=binarized)
class ConvBlock1bit(HybridBlock):
"""
Standard convolution block with Batch normalization and ReLU activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
bn_affine=True,
bn_use_global_stats=False,
activate=True,
binarized=False,
**kwargs):
super(ConvBlock1bit, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
binarized=binarized)
self.bn = nn.BatchNorm(
in_channels=out_channels,
center=bn_affine,
scale=bn_affine,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block_1bit(in_channels,
out_channels,
strides=1,
padding=0,
groups=1,
use_bias=False,
bn_affine=True,
bn_use_global_stats=False,
activate=True,
binarized=False):
"""
1x1 version of the standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return ConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
bn_affine=bn_affine,
bn_use_global_stats=bn_use_global_stats,
activate=activate,
binarized=binarized)
class PreConvBlock1bit(HybridBlock):
"""
Convolution block with Batch normalization and ReLU pre-activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
bn_affine=True,
bn_use_global_stats=False,
return_preact=False,
activate=True,
binarized=False,
**kwargs):
super(PreConvBlock1bit, self).__init__(**kwargs)
self.return_preact = return_preact
self.activate = activate
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
center=bn_affine,
scale=bn_affine,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
self.conv = Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
binarized=binarized)
def hybrid_forward(self, F, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv3x3_block_1bit(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
bn_affine=True,
bn_use_global_stats=False,
return_preact=False,
activate=True,
binarized=False):
"""
3x3 version of the pre-activated convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return PreConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
bn_affine=bn_affine,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate,
binarized=binarized)
class PreResBlock1bit(HybridBlock):
"""
Simple PreResNet block for residual path in ResNet unit (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
binarized=False,
**kwargs):
super(PreResBlock1bit, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv3x3_block_1bit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_affine=False,
bn_use_global_stats=bn_use_global_stats,
return_preact=False,
binarized=binarized)
self.conv2 = pre_conv3x3_block_1bit(
in_channels=out_channels,
out_channels=out_channels,
bn_affine=False,
bn_use_global_stats=bn_use_global_stats,
binarized=binarized)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PreResUnit1bit(HybridBlock):
"""
PreResNet unit with residual connection (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
binarized=False,
**kwargs):
super(PreResUnit1bit, self).__init__(**kwargs)
self.resize_identity = (strides != 1)
with self.name_scope():
self.body = PreResBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
binarized=binarized)
if self.resize_identity:
self.identity_pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
identity = x
x = self.body(x)
if self.resize_identity:
identity = self.identity_pool(identity)
identity = F.concat(identity, F.zeros_like(identity), dim=1)
x = x + identity
return x
class PreResActivation(HybridBlock):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_affine=True,
bn_use_global_stats=False,
**kwargs):
super(PreResActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
center=bn_affine,
scale=bn_affine,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class CIFARWRN1bit(HybridBlock):
"""
WRN-1bit model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
binarized : bool, default True
Whether to use binarization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
binarized=True,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARWRN1bit, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_1bit(
in_channels=in_channels,
out_channels=init_block_channels,
binarized=binarized))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit1bit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
binarized=binarized))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_affine=False))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1_block_1bit(
in_channels=in_channels,
out_channels=classes,
activate=False,
binarized=binarized))
self.output.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_wrn1bit_cifar(classes,
blocks,
width_factor,
binarized=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create WRN-1bit model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
binarized : bool, default True
Whether to use binarization.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels *= width_factor
net = CIFARWRN1bit(
channels=channels,
init_block_channels=init_block_channels,
binarized=binarized,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def wrn20_10_1bit_cifar10(classes=10, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar10", **kwargs)
def wrn20_10_1bit_cifar100(classes=100, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar100", **kwargs)
def wrn20_10_1bit_svhn(classes=10, **kwargs):
"""
WRN-20-10-1bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_svhn", **kwargs)
def wrn20_10_32bit_cifar10(classes=10, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar10", **kwargs)
def wrn20_10_32bit_cifar100(classes=100, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar100", **kwargs)
def wrn20_10_32bit_svhn(classes=10, **kwargs):
"""
WRN-20-10-32bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(wrn20_10_1bit_cifar10, 10),
(wrn20_10_1bit_cifar100, 100),
(wrn20_10_1bit_svhn, 10),
(wrn20_10_32bit_cifar10, 10),
(wrn20_10_32bit_cifar100, 100),
(wrn20_10_32bit_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn20_10_1bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_1bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_1bit_svhn or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_32bit_svhn or weight_count == 26737140)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 28,170 | 32.657109 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/condensenet.py | """
CondenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
"""
__all__ = ['CondenseNet', 'condensenet74_c4_g4', 'condensenet74_c8_g8']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ChannelShuffle
class CondenseSimpleConv(HybridBlock):
"""
CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
bn_use_global_stats,
**kwargs):
super(CondenseSimpleConv, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
return x
def condense_simple_conv3x3(in_channels,
out_channels,
groups,
bn_use_global_stats):
"""
3x3 version of the CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return CondenseSimpleConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
class CondenseComplexConv(HybridBlock):
"""
CondenseNet specific complex convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
bn_use_global_stats,
**kwargs):
super(CondenseComplexConv, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=groups)
self.index = self.params.get(
"index",
grad_req="null",
shape=(in_channels,),
init="zeros",
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, index):
x = F.take(x, index, axis=1)
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
x = self.c_shuffle(x)
return x
def condense_complex_conv1x1(in_channels,
out_channels,
groups,
bn_use_global_stats):
"""
1x1 version of the CondenseNet specific complex convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return CondenseComplexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
class CondenseUnit(HybridBlock):
"""
CondenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
groups,
bn_use_global_stats,
**kwargs):
super(CondenseUnit, self).__init__(**kwargs)
bottleneck_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bottleneck_size
with self.name_scope():
self.conv1 = condense_complex_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = condense_simple_conv3x3(
in_channels=mid_channels,
out_channels=inc_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = F.concat(identity, x, dim=1)
return x
class TransitionBlock(HybridBlock):
"""
CondenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
"""
def __init__(self,
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
class CondenseInitBlock(HybridBlock):
"""
CondenseNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(CondenseInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class PostActivation(HybridBlock):
"""
CondenseNet final block, which performs the same function of postactivation as in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(PostActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class CondenseDense(HybridBlock):
"""
CondenseNet specific dense block.
Parameters:
----------
units : int
Number of output channels.
in_units : int
Number of input channels.
drop_rate : float
Fraction of input channels for drop.
"""
def __init__(self,
units,
in_units,
drop_rate=0.5,
**kwargs):
super(CondenseDense, self).__init__(**kwargs)
drop_in_units = int(in_units * drop_rate)
with self.name_scope():
self.dense = nn.Dense(
units=units,
in_units=drop_in_units)
self.index = self.params.get(
"index",
grad_req="null",
shape=(drop_in_units,),
init="zeros",
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, index):
x = F.take(x, index, axis=1)
x = self.dense(x)
return x
class CondenseNet(HybridBlock):
"""
CondenseNet model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CondenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(CondenseInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock())
for j, out_channels in enumerate(channels_per_stage):
stage.add(CondenseUnit(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(PostActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(CondenseDense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_condensenet(num_layers,
groups=4,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CondenseNet (converted) model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 74:
init_block_channels = 16
layers = [4, 6, 8, 10, 8]
growth_rates = [8, 16, 32, 64, 128]
else:
raise ValueError("Unsupported CondenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[yi[1]] * yi[0],
[xi[-1][-1]])[1:]],
zip(layers, growth_rates),
[[init_block_channels]])[1:]
net = CondenseNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def condensenet74_c4_g4(**kwargs):
"""
CondenseNet-74 (C=G=4) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_condensenet(num_layers=74, groups=4, model_name="condensenet74_c4_g4", **kwargs)
def condensenet74_c8_g8(**kwargs):
"""
CondenseNet-74 (C=G=8) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_condensenet(num_layers=74, groups=8, model_name="condensenet74_c8_g8", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
condensenet74_c4_g4,
condensenet74_c8_g8,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
assert (model != condensenet74_c4_g4 or weight_count == 4773944)
assert (model != condensenet74_c8_g8 or weight_count == 2935416)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,278 | 30.245931 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.