repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/zfnet.py
|
"""
ZFNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
"""
__all__ = ['zfnet', 'zfnetb']
import os
from mxnet import cpu
from .alexnet import AlexNet
def get_zfnet(version="a",
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ZFNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of ZFNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[96], [256], [512, 1024, 512]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
else:
raise ValueError("Unsupported ZFNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def zfnet(**kwargs):
"""
ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_zfnet(model_name="zfnet", **kwargs)
def zfnetb(**kwargs):
"""
ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_zfnet(version="b", model_name="zfnetb", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
zfnet,
zfnetb,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != zfnet or weight_count == 62357608)
assert (model != zfnetb or weight_count == 107627624)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 4,058
| 28.201439
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/peleenet.py
|
"""
PeleeNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882.
"""
__all__ = ['PeleeNet', 'peleenet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1_block, conv3x3_block
class PeleeBranch1(HybridBlock):
"""
PeleeNet branch type 1 block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
strides=1,
bn_use_global_stats=False,
**kwargs):
super(PeleeBranch1, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PeleeBranch2(HybridBlock):
"""
PeleeNet branch type 2 block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
bn_use_global_stats,
**kwargs):
super(PeleeBranch2, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class StemBlock(HybridBlock):
"""
PeleeNet stem block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(StemBlock, self).__init__(**kwargs)
mid1_channels = out_channels // 2
mid2_channels = out_channels * 2
with self.name_scope():
self.first_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
strides=2)
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(PeleeBranch1(
in_channels=out_channels,
out_channels=out_channels,
mid_channels=mid1_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0))
self.last_conv = conv1x1_block(
in_channels=mid2_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.first_conv(x)
x = self.branches(x)
x = self.last_conv(x)
return x
class DenseBlock(HybridBlock):
"""
PeleeNet dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_size : int
Bottleneck width.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_size,
bn_use_global_stats,
**kwargs):
super(DenseBlock, self).__init__(**kwargs)
inc_channels = (out_channels - in_channels) // 2
mid_channels = inc_channels * bottleneck_size
with self.name_scope():
self.branch1 = PeleeBranch1(
in_channels=in_channels,
out_channels=inc_channels,
mid_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.branch2 = PeleeBranch2(
in_channels=in_channels,
out_channels=inc_channels,
mid_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x = F.concat(x, x1, x2, dim=1)
return x
class TransitionBlock(HybridBlock):
"""
PeleeNet's transition block, like in DensNet, but with ordinary convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class PeleeNet(HybridBlock):
"""
PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'
https://arxiv.org/abs/1804.06882.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck_sizes : list of int
Bottleneck sizes for each stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.5
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck_sizes,
bn_use_global_stats=False,
dropout_rate=0.5,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(PeleeNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(StemBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
bottleneck_size = bottleneck_sizes[i]
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
for j, out_channels in enumerate(channels_per_stage):
stage.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
bottleneck_size=bottleneck_size,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_peleenet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PeleeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
growth_rate = 32
layers = [3, 4, 8, 6]
bottleneck_sizes = [1, 2, 4, 4]
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1]])[1:]],
layers,
[[init_block_channels]])[1:]
net = PeleeNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck_sizes=bottleneck_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def peleenet(**kwargs):
"""
PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'
https://arxiv.org/abs/1804.06882.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_peleenet(model_name="peleenet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
peleenet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != peleenet or weight_count == 2802248)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,545
| 31.252381
| 117
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/regnetv.py
|
"""
RegNetV for ImageNet-1K, implemented in Gluon.
Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
"""
__all__ = ['RegNetV', 'regnetv002', 'regnetv004', 'regnetv006', 'regnetv008', 'regnetv016', 'regnetv032', 'regnetv040',
'regnetv064', 'regnetv080', 'regnetv120', 'regnetv160', 'regnetv320']
import os
import numpy as np
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwsconv3x3_block
class DownBlock(HybridBlock):
"""
ResNet(A)-like downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DownBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=strides,
strides=strides,
ceil_mode=True,
count_include_pad=False)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class RegNetVUnit(HybridBlock):
"""
RegNetV unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
downscale : bool
Whether to downscale tensor.
dw_use_bn : bool
Whether to use BatchNorm layer (depthwise convolution block).
dw_activation : function or str or None
Activation function after the depthwise convolution block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
downscale,
dw_use_bn,
dw_activation,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(RegNetVUnit, self).__init__(**kwargs)
self.downscale = downscale
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.downscale:
self.pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
self.conv2 = dwsconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
dw_use_bn=dw_use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dw_activation=dw_activation,
pw_activation=None)
if self.downscale:
self.identity_block = DownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.downscale:
identity = self.identity_block(x)
else:
identity = x
x = self.conv1(x)
if self.downscale:
x = self.pool(x)
x = self.conv2(x)
x = x + identity
x = self.activ(x)
return x
class RegNetVInitBlock(HybridBlock):
"""
RegNetV specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(RegNetVInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
return x
class RegNetV(HybridBlock):
"""
RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dw_use_bn=True,
dw_activation=(lambda: nn.Activation("relu")),
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(RegNetV, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(RegNetVInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downscale = (j == 0)
stage.add(RegNetVUnit(
in_channels=in_channels,
out_channels=out_channels,
downscale=downscale,
dw_use_bn=dw_use_bn,
dw_activation=dw_activation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_regnet(channels_init,
channels_slope,
channels_mult,
depth,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create RegNet model with specific parameters.
Parameters:
----------
channels_init : float
Initial value for channels/widths.
channels_slope : float
Slope value for channels/widths.
width_mult : float
Width multiplier value.
depth : int
Depth value.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
divisor = 8
assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0)
# Generate continuous per-block channels/widths:
channels_cont = np.arange(depth) * channels_slope + channels_init
# Generate quantized per-block channels/widths:
channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult))
channels = channels_init * np.power(channels_mult, channels_exps)
channels = (np.round(channels / divisor) * divisor).astype(np.int)
# Generate per stage channels/widths and layers/depths:
channels_per_stage, layers = np.unique(channels, return_counts=True)
channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)]
init_block_channels = 32
dws_simplified = True
if dws_simplified:
dw_use_bn = False
dw_activation = None
else:
dw_use_bn = True
dw_activation = (lambda: nn.Activation("relu"))
net = RegNetV(
channels=channels,
init_block_channels=init_block_channels,
dw_use_bn=dw_use_bn,
dw_activation=dw_activation,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def regnetv002(**kwargs):
"""
RegNetV-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13,
model_name="regnetv002", **kwargs)
def regnetv004(**kwargs):
"""
RegNetV-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22,
model_name="regnetv004", **kwargs)
def regnetv006(**kwargs):
"""
RegNetV-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16,
model_name="regnetv006", **kwargs)
def regnetv008(**kwargs):
"""
RegNetV-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16,
model_name="regnetv008", **kwargs)
def regnetv016(**kwargs):
"""
RegNetV-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18,
model_name="regnetv016", **kwargs)
def regnetv032(**kwargs):
"""
RegNetV-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25,
model_name="regnetv032", **kwargs)
def regnetv040(**kwargs):
"""
RegNetV-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23,
model_name="regnetv040", **kwargs)
def regnetv064(**kwargs):
"""
RegNetV-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17,
model_name="regnetv064", **kwargs)
def regnetv080(**kwargs):
"""
RegNetV-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23,
model_name="regnetv080", **kwargs)
def regnetv120(**kwargs):
"""
RegNetV-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19,
model_name="regnetv120", **kwargs)
def regnetv160(**kwargs):
"""
RegNetV-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22,
model_name="regnetv160", **kwargs)
def regnetv320(**kwargs):
"""
RegNetV-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23,
model_name="regnetv320", **kwargs)
def _test():
import numpy as np
import mxnet as mx
dws_simplified = True
pretrained = False
models = [
regnetv002,
regnetv004,
regnetv006,
regnetv008,
regnetv016,
regnetv032,
regnetv040,
regnetv064,
regnetv080,
regnetv120,
regnetv160,
regnetv320,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if dws_simplified:
assert (model != regnetv002 or weight_count == 2476840)
assert (model != regnetv004 or weight_count == 4467080)
assert (model != regnetv006 or weight_count == 5242936)
assert (model != regnetv008 or weight_count == 6353000)
assert (model != regnetv016 or weight_count == 7824440)
assert (model != regnetv032 or weight_count == 11540536)
assert (model != regnetv040 or weight_count == 18323824)
assert (model != regnetv064 or weight_count == 20854680)
assert (model != regnetv080 or weight_count == 21930224)
assert (model != regnetv120 or weight_count == 32833720)
assert (model != regnetv160 or weight_count == 36213360)
assert (model != regnetv320 or weight_count == 64659576)
else:
assert (model != regnetv002 or weight_count == 2479160)
assert (model != regnetv004 or weight_count == 4474712)
assert (model != regnetv006 or weight_count == 5249352)
assert (model != regnetv008 or weight_count == 6360344)
assert (model != regnetv016 or weight_count == 7833768)
assert (model != regnetv032 or weight_count == 11556520)
assert (model != regnetv040 or weight_count == 18343728)
assert (model != regnetv064 or weight_count == 20873384)
assert (model != regnetv080 or weight_count == 21952400)
assert (model != regnetv120 or weight_count == 32859432)
assert (model != regnetv160 or weight_count == 36244240)
assert (model != regnetv320 or weight_count == 64704008)
batch = 14
size = 224
x = mx.nd.zeros((batch, 3, size, size), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 21,904
| 34.444984
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/sharesnet.py
|
"""
ShaResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
"""
__all__ = ['ShaResNet', 'sharesnet18', 'sharesnet34', 'sharesnet50', 'sharesnet50b', 'sharesnet101', 'sharesnet101b',
'sharesnet152', 'sharesnet152b']
import os
from inspect import isfunction
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ReLU6, conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
class ShaConvBlock(HybridBlock):
"""
Shared convolution block with Batch normalization and ReLU/ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
activate=True,
shared_conv=None,
**kwargs):
super(ShaConvBlock, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
if shared_conv is None:
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
else:
self.conv = shared_conv
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
assert (activation is not None)
if isfunction(activation):
self.activ = activation()
elif isinstance(activation, str):
if activation == "relu6":
self.activ = ReLU6()
else:
self.activ = nn.Activation(activation)
else:
self.activ = activation
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def sha_conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
activate=True,
shared_conv=None,
**kwargs):
"""
3x3 version of the shared convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
return ShaConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
activate=activate,
shared_conv=shared_conv,
**kwargs)
class ShaResBlock(HybridBlock):
"""
Simple ShaResNet block for residual path in ShaResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
shared_conv=None,
**kwargs):
super(ShaResBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = sha_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None,
activate=False,
shared_conv=shared_conv)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ShaResBottleneck(HybridBlock):
"""
ShaResNet bottleneck block for residual path in ShaResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 4
Bottleneck factor.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
conv1_stride=False,
bottleneck_factor=4,
shared_conv=None,
**kwargs):
super(ShaResBottleneck, self).__init__(**kwargs)
assert (conv1_stride or not ((strides > 1) and (shared_conv is not None)))
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
bn_use_global_stats=bn_use_global_stats)
self.conv2 = sha_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
bn_use_global_stats=bn_use_global_stats,
shared_conv=shared_conv)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ShaResUnit(HybridBlock):
"""
ShaResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
conv1_stride,
shared_conv=None,
**kwargs):
super(ShaResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ShaResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride,
shared_conv=shared_conv)
else:
self.body = ShaResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
shared_conv=shared_conv)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ShaResNet(HybridBlock):
"""
ShaResNet model from 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShaResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
shared_conv = None
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
unit = ShaResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
shared_conv=shared_conv)
if (shared_conv is None) and not (bottleneck and not conv1_stride and strides > 1):
shared_conv = unit.body.conv2.conv
stage.add(unit)
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sharesnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShaResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ShaResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ShaResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sharesnet18(**kwargs):
"""
ShaResNet-18 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=18, model_name="sharesnet18", **kwargs)
def sharesnet34(**kwargs):
"""
ShaResNet-34 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=34, model_name="sharesnet34", **kwargs)
def sharesnet50(**kwargs):
"""
ShaResNet-50 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=50, model_name="sharesnet50", **kwargs)
def sharesnet50b(**kwargs):
"""
ShaResNet-50b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual
network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=50, conv1_stride=False, model_name="sharesnet50b", **kwargs)
def sharesnet101(**kwargs):
"""
ShaResNet-101 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=101, model_name="sharesnet101", **kwargs)
def sharesnet101b(**kwargs):
"""
ShaResNet-101b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual
network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=101, conv1_stride=False, model_name="sharesnet101b", **kwargs)
def sharesnet152(**kwargs):
"""
ShaResNet-152 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'
https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=152, model_name="sharesnet152", **kwargs)
def sharesnet152b(**kwargs):
"""
ShaResNet-152b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual
network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sharesnet(blocks=152, conv1_stride=False, model_name="sharesnet152b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sharesnet18,
sharesnet34,
sharesnet50,
sharesnet50b,
sharesnet101,
sharesnet101b,
sharesnet152,
sharesnet152b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sharesnet18 or weight_count == 8556072)
assert (model != sharesnet34 or weight_count == 13613864)
assert (model != sharesnet50 or weight_count == 17373224)
assert (model != sharesnet50b or weight_count == 20469800)
assert (model != sharesnet101 or weight_count == 26338344)
assert (model != sharesnet101b or weight_count == 29434920)
assert (model != sharesnet152 or weight_count == 33724456)
assert (model != sharesnet152b or weight_count == 36821032)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,240
| 33.73991
| 117
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/ibppose_coco.py
|
"""
IBPPose for COCO Keypoint, implemented in Gluon.
Original paper: 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,'
https://arxiv.org/abs/1911.10529.
"""
__all__ = ['IbpPose', 'ibppose_coco']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import get_activation_layer, conv1x1_block, conv3x3_block, conv7x7_block, SEBlock, Hourglass,\
InterpolationBlock
class IbpResBottleneck(HybridBlock):
"""
Bottleneck block for residual path in the residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
bottleneck_factor=2,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(IbpResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
activation=activation)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
use_bias=use_bias,
activation=activation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=use_bias,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IbpResUnit(HybridBlock):
"""
ResNet-like residual unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
bottleneck_factor : int, default 2
Bottleneck factor.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_bias=False,
bottleneck_factor=2,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(IbpResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = IbpResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
bottleneck_factor=bottleneck_factor,
activation=activation)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
activation=None)
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class IbpBackbone(HybridBlock):
"""
IBPPose backbone.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
activation,
**kwargs):
super(IbpBackbone, self).__init__(**kwargs)
dilations = (3, 3, 4, 4, 5, 5)
mid1_channels = out_channels // 4
mid2_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid1_channels,
strides=2,
activation=activation)
self.res1 = IbpResUnit(
in_channels=mid1_channels,
out_channels=mid2_channels,
activation=activation)
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.res2 = IbpResUnit(
in_channels=mid2_channels,
out_channels=mid2_channels,
activation=activation)
self.dilation_branch = nn.HybridSequential(prefix="")
for dilation in dilations:
self.dilation_branch.add(conv3x3_block(
in_channels=mid2_channels,
out_channels=mid2_channels,
padding=dilation,
dilation=dilation,
activation=activation))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.res1(x)
x = self.pool(x)
x = self.res2(x)
y = self.dilation_branch(x)
x = F.concat(x, y, dim=1)
return x
class IbpDownBlock(HybridBlock):
"""
IBPPose down block for the hourglass.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
activation,
**kwargs):
super(IbpDownBlock, self).__init__(**kwargs)
with self.name_scope():
self.down = nn.MaxPool2D(
pool_size=2,
strides=2)
self.res = IbpResUnit(
in_channels=in_channels,
out_channels=out_channels,
activation=activation)
def hybrid_forward(self, F, x):
x = self.down(x)
x = self.res(x)
return x
class IbpUpBlock(HybridBlock):
"""
IBPPose up block for the hourglass.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
use_bn,
activation,
**kwargs):
super(IbpUpBlock, self).__init__(**kwargs)
with self.name_scope():
self.res = IbpResUnit(
in_channels=in_channels,
out_channels=out_channels,
activation=activation)
self.up = InterpolationBlock(
scale_factor=2,
bilinear=False)
self.conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=activation)
def hybrid_forward(self, F, x):
x = self.res(x)
x = self.up(x)
x = self.conv(x)
return x
class MergeBlock(HybridBlock):
"""
IBPPose merge block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bn : bool
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
use_bn,
**kwargs):
super(MergeBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=None)
def hybrid_forward(self, F, x):
return self.conv(x)
class IbpPreBlock(HybridBlock):
"""
IBPPose preliminary decoder block.
Parameters:
----------
out_channels : int
Number of output channels.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
out_channels,
use_bn,
activation,
**kwargs):
super(IbpPreBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=activation)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=(not use_bn),
use_bn=use_bn,
activation=activation)
self.se = SEBlock(
channels=out_channels,
use_conv=False,
mid_activation=activation)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.se(x)
return x
class IbpPass(HybridBlock):
"""
IBPPose single pass decoder block.
Parameters:
----------
channels : int
Number of input/output channels.
mid_channels : int
Number of middle channels.
depth : int
Depth of hourglass.
growth_rate : int
Addition for number of channel for each level.
use_bn : bool
Whether to use BatchNorm layer.
activation : function or str or None
Activation function or name of activation function.
"""
def __init__(self,
channels,
mid_channels,
depth,
growth_rate,
merge,
use_bn,
activation,
**kwargs):
super(IbpPass, self).__init__(**kwargs)
self.merge = merge
with self.name_scope():
down_seq = nn.HybridSequential(prefix="")
up_seq = nn.HybridSequential(prefix="")
skip_seq = nn.HybridSequential(prefix="")
top_channels = channels
bottom_channels = channels
for i in range(depth + 1):
skip_seq.add(IbpResUnit(
in_channels=top_channels,
out_channels=top_channels,
activation=activation))
bottom_channels += growth_rate
if i < depth:
down_seq.add(IbpDownBlock(
in_channels=top_channels,
out_channels=bottom_channels,
activation=activation))
up_seq.add(IbpUpBlock(
in_channels=bottom_channels,
out_channels=top_channels,
use_bn=use_bn,
activation=activation))
top_channels = bottom_channels
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq)
self.pre_block = IbpPreBlock(
out_channels=channels,
use_bn=use_bn,
activation=activation)
self.post_block = conv1x1_block(
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
use_bn=False,
activation=None)
if self.merge:
self.pre_merge_block = MergeBlock(
in_channels=channels,
out_channels=channels,
use_bn=use_bn)
self.post_merge_block = MergeBlock(
in_channels=mid_channels,
out_channels=channels,
use_bn=use_bn)
def hybrid_forward(self, F, x, x_prev):
x = self.hg(x)
if x_prev is not None:
x = x + x_prev
y = self.pre_block(x)
z = self.post_block(y)
if self.merge:
z = self.post_merge_block(z) + self.pre_merge_block(y)
return z
class IbpPose(HybridBlock):
"""
IBPPose model from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person Pose Estimation,'
https://arxiv.org/abs/1911.10529.
Parameters:
----------
passes : int
Number of passes.
backbone_out_channels : int
Number of output channels for the backbone.
outs_channels : int
Number of output channels for the backbone.
depth : int
Depth of hourglass.
growth_rate : int
Addition for number of channel for each level.
use_bn : bool
Whether to use BatchNorm layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 256)
Spatial size of the expected input image.
"""
def __init__(self,
passes,
backbone_out_channels,
outs_channels,
depth,
growth_rate,
use_bn,
in_channels=3,
in_size=(256, 256),
**kwargs):
super(IbpPose, self).__init__(**kwargs)
self.in_size = in_size
activation = (lambda: nn.LeakyReLU(alpha=0.01))
with self.name_scope():
self.backbone = IbpBackbone(
in_channels=in_channels,
out_channels=backbone_out_channels,
activation=activation)
self.decoder = nn.HybridSequential(prefix="")
for i in range(passes):
merge = (i != passes - 1)
self.decoder.add(IbpPass(
channels=backbone_out_channels,
mid_channels=outs_channels,
depth=depth,
growth_rate=growth_rate,
merge=merge,
use_bn=use_bn,
activation=activation))
def hybrid_forward(self, F, x):
x = self.backbone(x)
x_prev = None
for block in self.decoder._children.values():
if x_prev is not None:
x = x + x_prev
x_prev = block(x, x_prev)
return x_prev
def get_ibppose(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBPPose model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
passes = 4
backbone_out_channels = 256
outs_channels = 50
depth = 4
growth_rate = 128
use_bn = True
net = IbpPose(
passes=passes,
backbone_out_channels=backbone_out_channels,
outs_channels=outs_channels,
depth=depth,
growth_rate=growth_rate,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibppose_coco(**kwargs):
"""
IBPPose model for COCO Keypoint from 'Simple Pose: Rethinking and Improving a Bottom-up Approach for Multi-Person
Pose Estimation,' https://arxiv.org/abs/1911.10529.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibppose(model_name="ibppose_coco", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (256, 256)
pretrained = False
models = [
ibppose_coco,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibppose_coco or weight_count == 95827784)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 50, in_size[0] // 4, in_size[0] // 4))
if __name__ == "__main__":
_test()
| 18,529
| 29.883333
| 117
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/xception.py
|
"""
Xception for ImageNet-1K, implemented in Gluon.
Original paper: 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357.
"""
__all__ = ['Xception', 'xception']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
class DwsConv(HybridBlock):
"""
Depthwise separable convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
**kwargs):
super(DwsConv, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = nn.Conv2D(
channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=in_channels,
use_bias=False,
in_channels=in_channels)
self.pw_conv = nn.Conv2D(
channels=out_channels,
kernel_size=1,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class DwsConvBlock(HybridBlock):
"""
Depthwise separable convolution block with batchnorm and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
bn_use_global_stats,
activate,
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
if self.activate:
self.activ = nn.Activation("relu")
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.activate:
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
def dws_conv3x3_block(in_channels,
out_channels,
bn_use_global_stats,
activate):
"""
3x3 version of the depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
bn_use_global_stats=bn_use_global_stats,
activate=activate)
class XceptionUnit(HybridBlock):
"""
Xception unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the downsample polling.
reps : int
Number of repetitions.
start_with_relu : bool, default True
Whether start with ReLU activation.
grow_first : bool, default True
Whether start from growing.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
reps,
start_with_relu=True,
grow_first=True,
bn_use_global_stats=False,
**kwargs):
super(XceptionUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.body = nn.HybridSequential(prefix="")
for i in range(reps):
if (grow_first and (i == 0)) or ((not grow_first) and (i == reps - 1)):
in_channels_i = in_channels
out_channels_i = out_channels
else:
if grow_first:
in_channels_i = out_channels
out_channels_i = out_channels
else:
in_channels_i = in_channels
out_channels_i = in_channels
activate = start_with_relu if (i == 0) else True
self.body.add(dws_conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
bn_use_global_stats=bn_use_global_stats,
activate=activate))
if strides != 1:
self.body.add(nn.MaxPool2D(
pool_size=3,
strides=strides,
padding=1))
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = F.identity(x)
x = self.body(x)
x = x + identity
return x
class XceptionInitBlock(HybridBlock):
"""
Xception specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(XceptionInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=0,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class XceptionFinalBlock(HybridBlock):
"""
Xception specific final block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(XceptionFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = dws_conv3x3_block(
in_channels=1024,
out_channels=1536,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.conv2 = dws_conv3x3_block(
in_channels=1536,
out_channels=2048,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.activ = nn.Activation("relu")
self.pool = nn.AvgPool2D(
pool_size=10,
strides=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.activ(x)
x = self.pool(x)
return x
class Xception(HybridBlock):
"""
Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,'
https://arxiv.org/abs/1610.02357.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(Xception, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(XceptionInitBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = 64
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(XceptionUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=(2 if (j == 0) else 1),
reps=(2 if (j == 0) else 3),
start_with_relu=((i != 0) or (j != 0)),
grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1)),
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(XceptionFinalBlock(bn_use_global_stats=bn_use_global_stats))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=2048))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_xception(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Xception model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[128], [256], [728] * 9, [1024]]
net = Xception(
channels=channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def xception(**kwargs):
"""
Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,'
https://arxiv.org/abs/1610.02357.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xception(model_name="xception", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
xception,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xception or weight_count == 22855952)
x = mx.nd.zeros((1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,182
| 30.87191
| 118
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/darknet53.py
|
"""
DarkNet-53 for ImageNet-1K, implemented in Gluon.
Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
"""
__all__ = ['DarkNet53', 'darknet53']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
class DarkUnit(HybridBlock):
"""
DarkNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
alpha : float
Slope coefficient for Leaky ReLU activation.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
alpha,
**kwargs):
super(DarkUnit, self).__init__(**kwargs)
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=nn.LeakyReLU(alpha=alpha))
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=nn.LeakyReLU(alpha=alpha))
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
return x + identity
class DarkNet53(HybridBlock):
"""
DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
alpha=0.1,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DarkNet53, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=nn.LeakyReLU(alpha=alpha)))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=nn.LeakyReLU(alpha=alpha)))
else:
stage.add(DarkUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
alpha=alpha))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_darknet53(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [2, 3, 9, 9, 5]
channels_per_layers = [64, 128, 256, 512, 1024]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = DarkNet53(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def darknet53(**kwargs):
"""
DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_darknet53(model_name="darknet53", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
darknet53,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet53 or weight_count == 41609928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 7,537
| 31.917031
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/mobilenet.py
|
"""
MobileNet for ImageNet-1K, implemented in Gluon.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'get_mobilenet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block, dwsconv3x3_block
class MobileNet(HybridBlock):
"""
MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_stage_stride : bool
Whether stride is used at the first stage.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
first_stage_stride,
dw_use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
dw_activation=(lambda: nn.Activation("relu")),
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MobileNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
init_block_channels = channels[0][0]
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels[1:]):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stage_stride) else 1
stage.add(dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dw_use_bn=dw_use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dw_activation=dw_activation))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mobilenet(width_scale,
dws_simplified=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MobileNet model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
dws_simplified : bool, default False
Whether to use simplified depthwise separable convolution block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]]
first_stage_stride = False
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if dws_simplified:
dw_use_bn = False
dw_activation = None
else:
dw_use_bn = True
dw_activation = (lambda: nn.Activation("relu"))
net = MobileNet(
channels=channels,
first_stage_stride=first_stage_stride,
dw_use_bn=dw_use_bn,
dw_activation=dw_activation,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mobilenet_w1(**kwargs):
"""
1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, model_name="mobilenet_w1", **kwargs)
def mobilenet_w3d4(**kwargs):
"""
0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, model_name="mobilenet_w3d4", **kwargs)
def mobilenet_wd2(**kwargs):
"""
0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, model_name="mobilenet_wd2", **kwargs)
def mobilenet_wd4(**kwargs):
"""
0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, model_name="mobilenet_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenet_w1,
mobilenet_w3d4,
mobilenet_wd2,
mobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1 or weight_count == 4231976)
assert (model != mobilenet_w3d4 or weight_count == 2585560)
assert (model != mobilenet_wd2 or weight_count == 1331592)
assert (model != mobilenet_wd4 or weight_count == 470072)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,265
| 34.098485
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/dpn.py
|
"""
DPN for ImageNet-1K, implemented in Gluon.
Original paper: 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
"""
__all__ = ['DPN', 'dpn68', 'dpn68b', 'dpn98', 'dpn107', 'dpn131']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, DualPathSequential
class GlobalAvgMaxPool2D(HybridBlock):
"""
Global average+max pooling operation for spatial data.
"""
def __init__(self,
**kwargs):
super(GlobalAvgMaxPool2D, self).__init__(**kwargs)
with self.name_scope():
self.avg_pool = nn.GlobalAvgPool2D()
self.max_pool = nn.GlobalMaxPool2D()
def hybrid_forward(self, F, x):
x_avg = self.avg_pool(x)
x_max = self.max_pool(x)
x = 0.5 * (x_avg + x_max)
return x
def dpn_batch_norm(channels):
"""
DPN specific Batch normalization layer.
Parameters:
----------
channels : int
Number of channels in input data.
"""
return nn.BatchNorm(
epsilon=0.001,
in_channels=channels)
class PreActivation(HybridBlock):
"""
DPN specific block, which performs the preactivation like in RreResNet.
Parameters:
----------
channels : int
Number of channels.
"""
def __init__(self,
channels,
**kwargs):
super(PreActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = dpn_batch_norm(channels=channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class DPNConv(HybridBlock):
"""
DPN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
**kwargs):
super(DPNConv, self).__init__(**kwargs)
with self.name_scope():
self.bn = dpn_batch_norm(channels=in_channels)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
return x
def dpn_conv1x1(in_channels,
out_channels,
strides=1):
"""
1x1 version of the DPN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
"""
return DPNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=1)
def dpn_conv3x3(in_channels,
out_channels,
strides,
groups):
"""
3x3 version of the DPN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
"""
return DPNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
groups=groups)
class DPNUnit(HybridBlock):
"""
DPN unit.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of intermediate channels.
bw : int
Number of residual channels.
inc : int
Incrementing step for channels.
groups : int
Number of groups in the units.
has_proj : bool
Whether to use projection.
key_strides : int
Key strides of the convolutions.
b_case : bool, default False
Whether to use B-case model.
"""
def __init__(self,
in_channels,
mid_channels,
bw,
inc,
groups,
has_proj,
key_strides,
b_case=False,
**kwargs):
super(DPNUnit, self).__init__(**kwargs)
self.bw = bw
self.has_proj = has_proj
self.b_case = b_case
with self.name_scope():
if self.has_proj:
self.conv_proj = dpn_conv1x1(
in_channels=in_channels,
out_channels=bw + 2 * inc,
strides=key_strides)
self.conv1 = dpn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = dpn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=key_strides,
groups=groups)
if b_case:
self.preactiv = PreActivation(channels=mid_channels)
self.conv3a = conv1x1(
in_channels=mid_channels,
out_channels=bw)
self.conv3b = conv1x1(
in_channels=mid_channels,
out_channels=inc)
else:
self.conv3 = dpn_conv1x1(
in_channels=mid_channels,
out_channels=bw + inc)
def hybrid_forward(self, F, x1, x2=None):
x_in = F.concat(x1, x2, dim=1) if x2 is not None else x1
if self.has_proj:
x_s = self.conv_proj(x_in)
x_s1 = F.slice_axis(x_s, axis=1, begin=0, end=self.bw)
x_s2 = F.slice_axis(x_s, axis=1, begin=self.bw, end=None)
else:
assert (x2 is not None)
x_s1 = x1
x_s2 = x2
x_in = self.conv1(x_in)
x_in = self.conv2(x_in)
if self.b_case:
x_in = self.preactiv(x_in)
y1 = self.conv3a(x_in)
y2 = self.conv3b(x_in)
else:
x_in = self.conv3(x_in)
y1 = F.slice_axis(x_in, axis=1, begin=0, end=self.bw)
y2 = F.slice_axis(x_in, axis=1, begin=self.bw, end=None)
residual = x_s1 + y1
dense = F.concat(x_s2, y2, dim=1)
return residual, dense
class DPNInitBlock(HybridBlock):
"""
DPN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
**kwargs):
super(DPNInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=2,
padding=padding,
use_bias=False,
in_channels=in_channels)
self.bn = dpn_batch_norm(channels=out_channels)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class DPNFinalBlock(HybridBlock):
"""
DPN final block, which performs the preactivation with cutting.
Parameters:
----------
channels : int
Number of channels.
"""
def __init__(self,
channels,
**kwargs):
super(DPNFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.activ = PreActivation(channels=channels)
def hybrid_forward(self, F, x1, x2):
assert (x2 is not None)
x = F.concat(x1, x2, dim=1)
x = self.activ(x)
return x, None
class DPN(HybridBlock):
"""
DPN model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_block_kernel_size : int or tuple/list of 2 int
Convolution window size for the initial unit.
init_block_padding : int or tuple/list of 2 int
Padding value for convolution layer in the initial unit.
rs : list f int
Number of intermediate channels for each unit.
bws : list f int
Number of residual channels for each unit.
incs : list f int
Incrementing step for channels for each unit.
groups : int
Number of groups in the units.
b_case : bool
Whether to use B-case model.
for_training : bool
Whether to use model for training.
test_time_pool : bool
Whether to use the avg-max pooling in the inference mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
init_block_kernel_size,
init_block_padding,
rs,
bws,
incs,
groups,
b_case,
for_training,
test_time_pool,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DPN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0,
prefix="")
self.features.add(DPNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
kernel_size=init_block_kernel_size,
padding=init_block_padding))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(prefix="stage{}_".format(i + 1))
r = rs[i]
bw = bws[i]
inc = incs[i]
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
has_proj = (j == 0)
key_strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DPNUnit(
in_channels=in_channels,
mid_channels=r,
bw=bw,
inc=inc,
groups=groups,
has_proj=has_proj,
key_strides=key_strides,
b_case=b_case))
in_channels = out_channels
self.features.add(stage)
self.features.add(DPNFinalBlock(channels=in_channels))
self.output = nn.HybridSequential(prefix="")
if for_training or not test_time_pool:
self.output.add(nn.GlobalAvgPool2D())
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
else:
self.output.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(GlobalAvgMaxPool2D())
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_dpn(num_layers,
b_case=False,
for_training=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DPN model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
b_case : bool, default False
Whether to use B-case model.
for_training : bool
Whether to use model for training.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 68:
init_block_channels = 10
init_block_kernel_size = 3
init_block_padding = 1
bw_factor = 1
k_r = 128
groups = 32
k_sec = (3, 4, 12, 3)
incs = (16, 32, 32, 64)
test_time_pool = True
elif num_layers == 98:
init_block_channels = 96
init_block_kernel_size = 7
init_block_padding = 3
bw_factor = 4
k_r = 160
groups = 40
k_sec = (3, 6, 20, 3)
incs = (16, 32, 32, 128)
test_time_pool = True
elif num_layers == 107:
init_block_channels = 128
init_block_kernel_size = 7
init_block_padding = 3
bw_factor = 4
k_r = 200
groups = 50
k_sec = (4, 8, 20, 3)
incs = (20, 64, 64, 128)
test_time_pool = True
elif num_layers == 131:
init_block_channels = 128
init_block_kernel_size = 7
init_block_padding = 3
bw_factor = 4
k_r = 160
groups = 40
k_sec = (4, 8, 28, 3)
incs = (16, 32, 32, 128)
test_time_pool = True
else:
raise ValueError("Unsupported DPN version with number of layers {}".format(num_layers))
channels = [[0] * li for li in k_sec]
rs = [0 * li for li in k_sec]
bws = [0 * li for li in k_sec]
for i in range(len(k_sec)):
rs[i] = (2 ** i) * k_r
bws[i] = (2 ** i) * 64 * bw_factor
inc = incs[i]
channels[i][0] = bws[i] + 3 * inc
for j in range(1, k_sec[i]):
channels[i][j] = channels[i][j - 1] + inc
net = DPN(
channels=channels,
init_block_channels=init_block_channels,
init_block_kernel_size=init_block_kernel_size,
init_block_padding=init_block_padding,
rs=rs,
bws=bws,
incs=incs,
groups=groups,
b_case=b_case,
for_training=for_training,
test_time_pool=test_time_pool,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def dpn68(**kwargs):
"""
DPN-68 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=68, b_case=False, model_name="dpn68", **kwargs)
def dpn68b(**kwargs):
"""
DPN-68b model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=68, b_case=True, model_name="dpn68b", **kwargs)
def dpn98(**kwargs):
"""
DPN-98 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=98, b_case=False, model_name="dpn98", **kwargs)
def dpn107(**kwargs):
"""
DPN-107 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=107, b_case=False, model_name="dpn107", **kwargs)
def dpn131(**kwargs):
"""
DPN-131 model from 'Dual Path Networks,' https://arxiv.org/abs/1707.01629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dpn(num_layers=131, b_case=False, model_name="dpn131", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
for_training = False
models = [
dpn68,
# dpn68b,
dpn98,
# dpn107,
dpn131,
]
for model in models:
net = model(pretrained=pretrained, for_training=for_training)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dpn68 or weight_count == 12611602)
assert (model != dpn68b or weight_count == 12611602)
assert (model != dpn98 or weight_count == 61570728)
assert (model != dpn107 or weight_count == 86917800)
assert (model != dpn131 or weight_count == 79254504)
# net.hybridize()
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,220
| 28.957037
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/sknet.py
|
"""
SKNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
"""
__all__ = ['SKNet', 'sknet50', 'sknet101', 'sknet152']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent
from .resnet import ResInitBlock
class SKConvBlock(HybridBlock):
"""
SKNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 32
Number of groups in branches.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
num_branches : int, default 2
Number of branches (`M` parameter in the paper).
reduction : int, default 16
Reduction value for intermediate channels (`r` parameter in the paper).
min_channels : int, default 32
Minimal number of intermediate channels (`L` parameter in the paper).
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups=32,
bn_use_global_stats=False,
num_branches=2,
reduction=16,
min_channels=32,
**kwargs):
super(SKConvBlock, self).__init__(**kwargs)
self.num_branches = num_branches
self.out_channels = out_channels
mid_channels = max(in_channels // reduction, min_channels)
with self.name_scope():
self.branches = Concurrent(stack=True, prefix="")
for i in range(num_branches):
dilation = 1 + i
self.branches.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
groups=groups,
bn_use_global_stats=bn_use_global_stats))
self.fc1 = conv1x1_block(
in_channels=out_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.fc2 = conv1x1(
in_channels=mid_channels,
out_channels=(out_channels * num_branches))
def hybrid_forward(self, F, x):
y = self.branches(x)
u = y.sum(axis=1)
s = F.contrib.AdaptiveAvgPooling2D(u, output_size=1)
z = self.fc1(s)
w = self.fc2(z)
w = w.reshape((0, self.num_branches, self.out_channels))
w = F.softmax(w, axis=1)
w = w.expand_dims(3).expand_dims(4)
y = F.broadcast_mul(y, w)
y = y.sum(axis=1)
return y
class SKNetBottleneck(HybridBlock):
"""
SKNet bottleneck block for residual path in SKNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 2
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
bottleneck_factor=2,
**kwargs):
super(SKNetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = SKConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class SKNetUnit(HybridBlock):
"""
SKNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
**kwargs):
super(SKNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = SKNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SKNet(HybridBlock):
"""
SKNet model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SKNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SKNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sknet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SKNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported SKNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SKNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sknet50(**kwargs):
"""
SKNet-50 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=50, model_name="sknet50", **kwargs)
def sknet101(**kwargs):
"""
SKNet-101 model from 'Selective Kernel Networks,' https://arxiv.org/abs/1903.06586.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=101, model_name="sknet101", **kwargs)
def sknet152(**kwargs):
"""
SKNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sknet(blocks=152, model_name="sknet152", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sknet50,
sknet101,
sknet152,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sknet50 or weight_count == 27479784)
assert (model != sknet101 or weight_count == 48736040)
assert (model != sknet152 or weight_count == 66295656)
x = mx.nd.zeros((14, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 12,972
| 31.595477
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/spnasnet.py
|
"""
Single-Path NASNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,'
https://arxiv.org/abs/1904.02877.
"""
__all__ = ['SPNASNet', 'spnasnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block
class SPNASUnit(HybridBlock):
"""
Single-Path NASNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int
Expansion factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
activation : str, default 'relu'
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_skip=True,
bn_use_global_stats=False,
activation="relu",
**kwargs):
super(SPNASUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_exp_conv = exp_factor > 1
mid_channels = exp_factor * in_channels
with self.name_scope():
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class SPNASInitBlock(HybridBlock):
"""
Single-Path NASNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
bn_use_global_stats=False,
**kwargs):
super(SPNASInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = SPNASUnit(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
use_skip=False,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SPNASFinalBlock(HybridBlock):
"""
Single-Path NASNet specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
bn_use_global_stats=False,
**kwargs):
super(SPNASFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = SPNASUnit(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
use_kernel3=True,
exp_factor=6,
use_skip=False,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SPNASNet(HybridBlock):
"""
Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,'
https://arxiv.org/abs/1904.02877.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SPNASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SPNASInitBlock(
in_channels=in_channels,
out_channels=init_block_channels[1],
mid_channels=init_block_channels[0],
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels[1]
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 3)) or\
((j == len(channels_per_stage) // 2) and (i == 3)) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
stage.add(SPNASUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(SPNASFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels[1],
mid_channels=final_block_channels[0],
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels[1]
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_spnasnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Single-Path NASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40, 40], [80, 80, 80, 80], [96, 96, 96, 96, 192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 6, 6, 6]]
net = SPNASNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def spnasnet(**kwargs):
"""
Single-Path NASNet model from 'Single-Path NAS: Designing Hardware-Efficient ConvNets in less than 4 Hours,'
https://arxiv.org/abs/1904.02877.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_spnasnet(model_name="spnasnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
spnasnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != spnasnet or weight_count == 4421616)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,626
| 33.405995
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/fastscnn.py
|
"""
Fast-SCNN for image segmentation, implemented in Gluon.
Original paper: 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502.
"""
__all__ = ['FastSCNN', 'fastscnn_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwsconv3x3_block, Concurrent,\
InterpolationBlock
class Stem(HybridBlock):
"""
Fast-SCNN specific stem block.
Parameters:
----------
in_channels : int
Number of input channels.
channels : tuple/list of 3 int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(Stem, self).__init__(**kwargs)
assert (len(channels) == 3)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=channels[0],
strides=2,
padding=0,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = dwsconv3x3_block(
in_channels=channels[0],
out_channels=channels[1],
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = dwsconv3x3_block(
in_channels=channels[1],
out_channels=channels[2],
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class LinearBottleneck(HybridBlock):
"""
Fast-SCNN specific Linear Bottleneck layer from MobileNetV2.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LinearBottleneck, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class FeatureExtractor(HybridBlock):
"""
Fast-SCNN specific feature extractor/encoder.
Parameters:
----------
in_channels : int
Number of input channels.
channels : list of list of int
Number of output channels for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(FeatureExtractor, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != len(channels) - 1) else 1
stage.add(LinearBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.features.add(stage)
def hybrid_forward(self, F, x):
x = self.features(x)
return x
class PoolingBranch(HybridBlock):
"""
Fast-SCNN specific pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int or None
Spatial size of input image.
down_size : int
Spatial size of downscaled image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
down_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(PoolingBranch, self).__init__(**kwargs)
self.in_size = in_size
self.down_size = down_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=self.down_size)
x = self.conv(x)
x = self.up(x, in_size)
return x
class FastPyramidPooling(HybridBlock):
"""
Fast-SCNN specific fast pyramid pooling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int or None
Spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(FastPyramidPooling, self).__init__(**kwargs)
down_sizes = [1, 2, 3, 6]
mid_channels = in_channels // 4
with self.name_scope():
self.branches = Concurrent()
self.branches.add(Identity())
for down_size in down_sizes:
self.branches.add(PoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
in_size=in_size,
down_size=down_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.conv = conv1x1_block(
in_channels=(in_channels * 2),
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.conv(x)
return x
class FeatureFusion(HybridBlock):
"""
Fast-SCNN specific feature fusion block.
Parameters:
----------
x_in_channels : int
Number of high resolution (x) input channels.
y_in_channels : int
Number of low resolution (y) input channels.
out_channels : int
Number of output channels.
x_in_size : tuple of 2 int or None
Spatial size of high resolution (x) input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_in_channels,
y_in_channels,
out_channels,
x_in_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(FeatureFusion, self).__init__(**kwargs)
self.x_in_size = x_in_size
with self.name_scope():
self.up = InterpolationBlock(
scale_factor=None,
out_size=x_in_size)
self.low_dw_conv = dwconv3x3_block(
in_channels=y_in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.low_pw_conv = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=True,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.high_conv = conv1x1_block(
in_channels=x_in_channels,
out_channels=out_channels,
use_bias=True,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x, y):
x_in_size = self.x_in_size if self.x_in_size is not None else x.shape[2:]
y = self.up(y, x_in_size)
y = self.low_dw_conv(y)
y = self.low_pw_conv(y)
x = self.high_conv(x)
out = x + y
return self.activ(out)
class Head(HybridBlock):
"""
Fast-SCNN head (classifier) block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
classes,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(Head, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = dwsconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = dwsconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.dropout = nn.Dropout(rate=0.1)
self.conv3 = conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.dropout(x)
x = self.conv3(x)
return x
class AuxHead(HybridBlock):
"""
Fast-SCNN auxiliary (after stem) head (classifier) block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
classes : int
Number of classification classes.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
mid_channels,
classes,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(AuxHead, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=classes,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
return x
class FastSCNN(HybridBlock):
"""
Fast-SCNN from 'Fast-SCNN: Fast Semantic Segmentation Network,' https://arxiv.org/abs/1902.04502.
Parameters:
----------
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 1024)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
aux=False,
fixed_size=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(1024, 1024),
classes=19,
**kwargs):
super(FastSCNN, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
steam_channels = [32, 48, 64]
self.stem = Stem(
in_channels=in_channels,
channels=steam_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
in_channels = steam_channels[-1]
feature_channels = [[64, 64, 64], [96, 96, 96], [128, 128, 128]]
self.features = FeatureExtractor(
in_channels=in_channels,
channels=feature_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
pool_out_size = (in_size[0] // 32, in_size[1] // 32) if fixed_size else None
self.pool = FastPyramidPooling(
in_channels=feature_channels[-1][-1],
out_channels=feature_channels[-1][-1],
in_size=pool_out_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
fusion_out_size = (in_size[0] // 8, in_size[1] // 8) if fixed_size else None
fusion_out_channels = 128
self.fusion = FeatureFusion(
x_in_channels=steam_channels[-1],
y_in_channels=feature_channels[-1][-1],
out_channels=fusion_out_channels,
x_in_size=fusion_out_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.head = Head(
in_channels=fusion_out_channels,
classes=classes,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size)
if self.aux:
self.aux_head = AuxHead(
in_channels=64,
mid_channels=64,
classes=classes,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x = self.stem(x)
y = self.features(x)
y = self.pool(y)
y = self.fusion(x, y)
y = self.head(y)
y = self.up(y, in_size)
if self.aux:
x = self.aux_head(x)
x = self.up(x, in_size)
return y, x
return y
def get_fastscnn(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Fast-SCNN model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = FastSCNN(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def fastscnn_cityscapes(classes=19, aux=True, **kwargs):
"""
Fast-SCNN model for Cityscapes from 'Fast-SCNN: Fast Semantic Segmentation Network,'
https://arxiv.org/abs/1902.04502.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fastscnn(classes=classes, aux=aux, model_name="fastscnn_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
# in_size = (1024, 1024)
in_size = (1024, 2048)
aux = True
pretrained = False
fixed_size = False
models = [
(fastscnn_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fastscnn_cityscapes or weight_count == 1176278)
else:
assert (model != fastscnn_cityscapes or weight_count == 1138051)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 21,678
| 33.520701
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/res2net.py
|
"""
Res2Net for ImageNet-1K, implemented in Gluon.
Original paper: 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
"""
__all__ = ['Res2Net', 'res2net50_w14_s8', 'res2net50_w26_s8']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1, conv3x3, conv1x1_block
from .resnet import ResInitBlock
from .preresnet import PreResActivation
class HierarchicalConcurrent(nn.HybridSequential):
"""
A container for hierarchical concatenation of blocks with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
multi_input : bool, default False
Whether input is multiple.
"""
def __init__(self,
axis=1,
multi_input=False,
**kwargs):
super(HierarchicalConcurrent, self).__init__(**kwargs)
self.axis = axis
self.multi_input = multi_input
def hybrid_forward(self, F, x):
out = []
y_prev = None
if self.multi_input:
xs = F.split(x, axis=self.axis, num_outputs=len(self._children.values()))
for i, block in enumerate(self._children.values()):
if self.multi_input:
y = block(xs[i])
else:
y = block(x)
if y_prev is not None:
y = y + y_prev
out.append(y)
y_prev = y
out = F.concat(*out, dim=self.axis)
return out
class Res2NetUnit(HybridBlock):
"""
Res2Net unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
width : int
Width of filters.
scale : int
Number of scale.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width,
scale,
bn_use_global_stats,
**kwargs):
super(Res2NetUnit, self).__init__(**kwargs)
self.scale = scale
downsample = (strides != 1)
self.resize_identity = (in_channels != out_channels) or downsample
mid_channels = width * scale
brn_channels = width
with self.name_scope():
self.reduce_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.branches = HierarchicalConcurrent(axis=1, multi_input=True, prefix="")
if downsample:
self.branches.add(conv1x1(
in_channels=brn_channels,
out_channels=brn_channels,
strides=strides))
else:
self.branches.add(Identity())
for i in range(scale - 1):
self.branches.add(conv3x3(
in_channels=brn_channels,
out_channels=brn_channels,
strides=strides))
self.preactiv = PreResActivation(in_channels=mid_channels)
self.merge_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
y = self.reduce_conv(x)
y = self.branches(y)
y = self.preactiv(y)
y = self.merge_conv(y)
y = y + identity
y = self.activ(y)
return y
class Res2Net(HybridBlock):
"""
Res2Net model from 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width : int
Width of filters.
scale : int
Number of scale.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
width,
scale,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(Res2Net, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(Res2NetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width=width,
scale=scale,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_res2net(blocks,
width,
scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Res2Net model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width : int
Width of filters.
scale : int
Number of scale.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
bottleneck = True
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported Res2Net with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = Res2Net(
channels=channels,
init_block_channels=init_block_channels,
width=width,
scale=scale,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def res2net50_w14_s8(**kwargs):
"""
Res2Net-50 (14wx8s) model from 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_res2net(blocks=50, width=14, scale=8, model_name="res2net50_w14_s8", **kwargs)
def res2net50_w26_s8(**kwargs):
"""
Res2Net-50 (26wx8s) model from 'Res2Net: A New Multi-scale Backbone Architecture,' https://arxiv.org/abs/1904.01169.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_res2net(blocks=50, width=26, scale=8, model_name="res2net50_w14_s8", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
res2net50_w14_s8,
res2net50_w26_s8,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != res2net50_w14_s8 or weight_count == 8231732)
assert (model != res2net50_w26_s8 or weight_count == 11432660)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,307
| 31.401146
| 120
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/darknet.py
|
"""
DarkNet for ImageNet-1K, implemented in Gluon.
Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
"""
__all__ = ['DarkNet', 'darknet_ref', 'darknet_tiny', 'darknet19']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
def dark_convYxY(in_channels,
out_channels,
bn_use_global_stats,
alpha,
pointwise):
"""
DarkNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
alpha : float
Slope coefficient for Leaky ReLU activation.
pointwise : bool
Whether use 1x1 (pointwise) convolution or 3x3 convolution.
"""
if pointwise:
return conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=nn.LeakyReLU(alpha=alpha))
else:
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=nn.LeakyReLU(alpha=alpha))
class DarkNet(HybridBlock):
"""
DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
odd_pointwise : bool
Whether pointwise convolution layer is used for each odd unit.
avg_pool_size : int
Window size of the final average pooling.
cls_activ : bool
Whether classification convolution layer uses an activation.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
odd_pointwise,
avg_pool_size,
cls_activ,
alpha=0.1,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DarkNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(dark_convYxY(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
alpha=alpha,
pointwise=(len(channels_per_stage) > 1) and not(((j + 1) % 2 == 1) ^ odd_pointwise)))
in_channels = out_channels
if i != len(channels) - 1:
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2))
self.features.add(stage)
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Conv2D(
channels=classes,
kernel_size=1,
in_channels=in_channels))
if cls_activ:
self.output.add(nn.LeakyReLU(alpha=alpha))
self.output.add(nn.AvgPool2D(
pool_size=avg_pool_size,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_darknet(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('ref', 'tiny' or '19').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == 'ref':
channels = [[16], [32], [64], [128], [256], [512], [1024]]
odd_pointwise = False
avg_pool_size = 3
cls_activ = True
elif version == 'tiny':
channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]]
odd_pointwise = True
avg_pool_size = 14
cls_activ = False
elif version == '19':
channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512],
[1024, 512, 1024, 512, 1024]]
odd_pointwise = False
avg_pool_size = 7
cls_activ = False
else:
raise ValueError("Unsupported DarkNet version {}".format(version))
net = DarkNet(
channels=channels,
odd_pointwise=odd_pointwise,
avg_pool_size=avg_pool_size,
cls_activ=cls_activ,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def darknet_ref(**kwargs):
"""
DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_darknet(version="ref", model_name="darknet_ref", **kwargs)
def darknet_tiny(**kwargs):
"""
DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs)
def darknet19(**kwargs):
"""
DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_darknet(version="19", model_name="darknet19", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
darknet_ref,
darknet_tiny,
darknet19,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet_ref or weight_count == 7319416)
assert (model != darknet_tiny or weight_count == 1042104)
assert (model != darknet19 or weight_count == 20842376)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,154
| 32.17029
| 116
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/ror_cifar.py
|
"""
RoR-3 for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
"""
__all__ = ['CIFARRoR', 'ror3_56_cifar10', 'ror3_56_cifar100', 'ror3_56_svhn', 'ror3_110_cifar10', 'ror3_110_cifar100',
'ror3_110_svhn', 'ror3_164_cifar10', 'ror3_164_cifar100', 'ror3_164_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
class RoRBlock(HybridBlock):
"""
RoR-3 block for residual path in residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(RoRBlock, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
if self.use_dropout:
x = self.dropout(x)
x = self.conv2(x)
return x
class RoRResUnit(HybridBlock):
"""
RoR-3 residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
last_activate : bool, default True
Whether activate output.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
last_activate=True,
**kwargs):
super(RoRResUnit, self).__init__(**kwargs)
self.last_activate = last_activate
self.resize_identity = (in_channels != out_channels)
with self.name_scope():
self.body = RoRBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
if self.last_activate:
x = self.activ(x)
return x
class RoRResStage(HybridBlock):
"""
RoR-3 residual stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each unit.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
downsample : bool, default True
Whether downsample output.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
dropout_rate,
downsample=True,
**kwargs):
super(RoRResStage, self).__init__(**kwargs)
self.downsample = downsample
with self.name_scope():
self.shortcut = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.units = nn.HybridSequential(prefix="")
with self.units.name_scope():
for i, out_channels in enumerate(out_channels_list):
last_activate = (i != len(out_channels_list) - 1)
self.units.add(RoRResUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
last_activate=last_activate))
in_channels = out_channels
if self.downsample:
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
identity = self.shortcut(x)
x = self.units(x)
x = x + identity
if self.downsample:
x = self.activ(x)
x = self.pool(x)
return x
class RoRResBody(HybridBlock):
"""
RoR-3 residual body (main feature path).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_lists : list of list of int
Number of output channels for each stage.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels_lists,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(RoRResBody, self).__init__(**kwargs)
with self.name_scope():
self.shortcut = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels_lists[-1][-1],
strides=4,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.stages = nn.HybridSequential(prefix="")
with self.stages.name_scope():
for i, channels_per_stage in enumerate(out_channels_lists):
downsample = (i != len(out_channels_lists) - 1)
self.stages.add(RoRResStage(
in_channels=in_channels,
out_channels_list=channels_per_stage,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
downsample=downsample))
in_channels = channels_per_stage[-1]
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = self.shortcut(x)
x = self.stages(x)
x = x + identity
x = self.activ(x)
return x
class CIFARRoR(HybridBlock):
"""
RoR-3 model for CIFAR from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
dropout_rate=0.0,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARRoR, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
self.features.add(RoRResBody(
in_channels=in_channels,
out_channels_lists=channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate))
in_channels = channels[-1][-1]
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ror_cifar(classes,
blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create RoR-3 model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
assert ((blocks - 8) % 6 == 0)
layers = [(blocks - 8) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARRoR(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ror3_56_cifar10(classes=10, **kwargs):
"""
RoR-3-56 model for CIFAR-10 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=56, model_name="ror3_56_cifar10", **kwargs)
def ror3_56_cifar100(classes=100, **kwargs):
"""
RoR-3-56 model for CIFAR-100 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=56, model_name="ror3_56_cifar100", **kwargs)
def ror3_56_svhn(classes=10, **kwargs):
"""
RoR-3-56 model for SVHN from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=56, model_name="ror3_56_svhn", **kwargs)
def ror3_110_cifar10(classes=10, **kwargs):
"""
RoR-3-110 model for CIFAR-10 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=110, model_name="ror3_110_cifar10", **kwargs)
def ror3_110_cifar100(classes=100, **kwargs):
"""
RoR-3-110 model for CIFAR-100 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=110, model_name="ror3_110_cifar100", **kwargs)
def ror3_110_svhn(classes=10, **kwargs):
"""
RoR-3-110 model for SVHN from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=110, model_name="ror3_110_svhn", **kwargs)
def ror3_164_cifar10(classes=10, **kwargs):
"""
RoR-3-164 model for CIFAR-10 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=164, model_name="ror3_164_cifar10", **kwargs)
def ror3_164_cifar100(classes=100, **kwargs):
"""
RoR-3-164 model for CIFAR-100 from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=164, model_name="ror3_164_cifar100", **kwargs)
def ror3_164_svhn(classes=10, **kwargs):
"""
RoR-3-164 model for SVHN from 'Residual Networks of Residual Networks: Multilevel Residual Networks,'
https://arxiv.org/abs/1608.02908.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ror_cifar(classes=classes, blocks=164, model_name="ror3_164_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(ror3_56_cifar10, 10),
(ror3_56_cifar100, 100),
(ror3_56_svhn, 10),
(ror3_110_cifar10, 10),
(ror3_110_cifar100, 100),
(ror3_110_svhn, 10),
(ror3_164_cifar10, 10),
(ror3_164_cifar100, 100),
(ror3_164_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ror3_56_cifar10 or weight_count == 762746)
assert (model != ror3_56_cifar100 or weight_count == 768596)
assert (model != ror3_56_svhn or weight_count == 762746)
assert (model != ror3_110_cifar10 or weight_count == 1637690)
assert (model != ror3_110_cifar100 or weight_count == 1643540)
assert (model != ror3_110_svhn or weight_count == 1637690)
assert (model != ror3_164_cifar10 or weight_count == 2512634)
assert (model != ror3_164_cifar100 or weight_count == 2518484)
assert (model != ror3_164_svhn or weight_count == 2512634)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 19,573
| 33.522046
| 118
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/dicenet.py
|
"""
DiCENet for ImageNet-1K, implemented in Gluon.
Original paper: 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516.
"""
__all__ = ['DiceNet', 'dicenet_wd5', 'dicenet_wd2', 'dicenet_w3d4', 'dicenet_w1', 'dicenet_w5d4', 'dicenet_w3d2',
'dicenet_w7d8', 'dicenet_w2']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, NormActivation, ChannelShuffle, Concurrent, PReLU2
class SpatialDiceBranch(HybridBlock):
"""
Spatial element of DiCE block for selected dimension.
Parameters:
----------
sp_size : int
Desired size for selected spatial dimension.
is_height : bool
Is selected dimension height.
fixed_size : bool
Whether to expect fixed spatial size of input image.
"""
def __init__(self,
sp_size,
is_height,
fixed_size,
**kwargs):
super(SpatialDiceBranch, self).__init__(**kwargs)
self.is_height = is_height
self.fixed_size = fixed_size
self.index = 2 if is_height else 3
self.base_sp_size = sp_size
with self.name_scope():
self.conv = conv3x3(
in_channels=self.base_sp_size,
out_channels=self.base_sp_size,
groups=self.base_sp_size)
def hybrid_forward(self, F, x):
if not self.fixed_size:
height, width = x.shape[2:]
if self.is_height:
real_sp_size = height
real_in_size = (real_sp_size, width)
base_in_size = (self.base_sp_size, width)
else:
real_sp_size = width
real_in_size = (height, real_sp_size)
base_in_size = (height, self.base_sp_size)
if real_sp_size != self.base_sp_size:
if real_sp_size < self.base_sp_size:
x = F.contrib.BilinearResize2D(x, height=base_in_size[0], width=base_in_size[1])
else:
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=base_in_size)
x = x.swapaxes(1, self.index)
x = self.conv(x)
x = x.swapaxes(1, self.index)
if not self.fixed_size:
changed_sp_size = x.shape[self.index]
if real_sp_size != changed_sp_size:
if changed_sp_size < real_sp_size:
x = F.contrib.BilinearResize2D(x, height=real_in_size[0], width=real_in_size[1])
else:
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=real_in_size)
return x
class DiceBaseBlock(HybridBlock):
"""
Base part of DiCE block (without attention).
Parameters:
----------
channels : int
Number of input/output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
in_size,
fixed_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DiceBaseBlock, self).__init__(**kwargs)
mid_channels = 3 * channels
with self.name_scope():
self.convs = Concurrent()
self.convs.add(conv3x3(
in_channels=channels,
out_channels=channels,
groups=channels))
self.convs.add(SpatialDiceBranch(
sp_size=in_size[0],
is_height=True,
fixed_size=fixed_size))
self.convs.add(SpatialDiceBranch(
sp_size=in_size[1],
is_height=False,
fixed_size=fixed_size))
self.norm_activ = NormActivation(
in_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=mid_channels)))
self.shuffle = ChannelShuffle(
channels=mid_channels,
groups=3)
self.squeeze_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=channels,
groups=channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=channels)))
def hybrid_forward(self, F, x):
x = self.convs(x)
x = self.norm_activ(x)
x = self.shuffle(x)
x = self.squeeze_conv(x)
return x
class DiceAttBlock(HybridBlock):
"""
Pure attention part of DiCE block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 4
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
reduction=4,
**kwargs):
super(DiceAttBlock, self).__init__(**kwargs)
mid_channels = in_channels // reduction
with self.name_scope():
self.pool = nn.GlobalAvgPool2D()
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=False)
self.activ = nn.Activation("relu")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=False)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
return w
class DiceBlock(HybridBlock):
"""
DiCE block (volume-wise separable convolutions).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
fixed_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DiceBlock, self).__init__(**kwargs)
proj_groups = math.gcd(in_channels, out_channels)
with self.name_scope():
self.base_block = DiceBaseBlock(
channels=in_channels,
in_size=in_size,
fixed_size=fixed_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.att = DiceAttBlock(
in_channels=in_channels,
out_channels=out_channels)
# assert (in_channels == out_channels)
self.proj_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
groups=proj_groups,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=out_channels)))
def hybrid_forward(self, F, x):
x = self.base_block(x)
w = self.att(x)
x = self.proj_conv(x)
x = F.broadcast_mul(x, w)
return x
class StridedDiceLeftBranch(HybridBlock):
"""
Left branch of the strided DiCE block.
Parameters:
----------
channels : int
Number of input/output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(StridedDiceLeftBranch, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=channels,
strides=2,
groups=channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=channels)))
self.conv2 = conv1x1_block(
in_channels=channels,
out_channels=channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class StridedDiceRightBranch(HybridBlock):
"""
Right branch of the strided DiCE block.
Parameters:
----------
channels : int
Number of input/output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
in_size,
fixed_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(StridedDiceRightBranch, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
self.dice = DiceBlock(
in_channels=channels,
out_channels=channels,
in_size=(in_size[0] // 2, in_size[1] // 2),
fixed_size=fixed_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv = conv1x1_block(
in_channels=channels,
out_channels=channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=channels)))
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.dice(x)
x = self.conv(x)
return x
class StridedDiceBlock(HybridBlock):
"""
Strided DiCE block (strided volume-wise separable convolutions).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
fixed_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(StridedDiceBlock, self).__init__(**kwargs)
assert (out_channels == 2 * in_channels)
with self.name_scope():
self.branches = Concurrent()
self.branches.add(StridedDiceLeftBranch(
channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.branches.add(StridedDiceRightBranch(
channels=in_channels,
in_size=in_size,
fixed_size=fixed_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.shuffle(x)
return x
class ShuffledDiceRightBranch(HybridBlock):
"""
Right branch of the shuffled DiCE block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
fixed_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ShuffledDiceRightBranch, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=out_channels)))
self.dice = DiceBlock(
in_channels=out_channels,
out_channels=out_channels,
in_size=in_size,
fixed_size=fixed_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.dice(x)
return x
class ShuffledDiceBlock(HybridBlock):
"""
Shuffled DiCE block (shuffled volume-wise separable convolutions).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of two ints
Spatial size of the expected input image.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
fixed_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ShuffledDiceBlock, self).__init__(**kwargs)
self.left_part = in_channels - in_channels // 2
right_in_channels = in_channels - self.left_part
right_out_channels = out_channels - self.left_part
with self.name_scope():
self.right_branch = ShuffledDiceRightBranch(
in_channels=right_in_channels,
out_channels=right_out_channels,
in_size=in_size,
fixed_size=fixed_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.shuffle = ChannelShuffle(
channels=(2 * right_out_channels),
groups=2)
def hybrid_forward(self, F, x):
x1, x2 = F.split(x, axis=1, num_outputs=2)
x2 = self.right_branch(x2)
x = F.concat(x1, x2, dim=1)
x = self.shuffle(x)
return x
class DiceInitBlock(HybridBlock):
"""
DiceNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DiceInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(in_channels=out_channels)))
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class DiceClassifier(HybridBlock):
"""
DiceNet specific classifier block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
classes : int, default 1000
Number of classification classes.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
mid_channels,
classes,
dropout_rate,
**kwargs):
super(DiceClassifier, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=4)
self.dropout = nn.Dropout(rate=dropout_rate)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=classes,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
return x
class DiceNet(HybridBlock):
"""
DiCENet model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,' https://arxiv.org/abs/1906.03516.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
classifier_mid_channels : int
Number of middle channels for classifier.
dropout_rate : float
Parameter of Dropout layer in classifier. Faction of the input units to drop.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
classifier_mid_channels,
dropout_rate,
fixed_size=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DiceNet, self).__init__(**kwargs)
assert ((in_size[0] % 32 == 0) and (in_size[1] % 32 == 0))
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(DiceInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
in_size = (in_size[0] // 4, in_size[1] // 4)
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
unit_class = StridedDiceBlock if j == 0 else ShuffledDiceBlock
stage.add(unit_class(
in_channels=in_channels,
out_channels=out_channels,
in_size=in_size,
fixed_size=fixed_size,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
in_size = (in_size[0] // 2, in_size[1] // 2) if j == 0 else in_size
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(DiceClassifier(
in_channels=in_channels,
mid_channels=classifier_mid_channels,
classes=classes,
dropout_rate=dropout_rate))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_dicenet(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DiCENet model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels_per_layers_dict = {
0.2: [32, 64, 128],
0.5: [48, 96, 192],
0.75: [86, 172, 344],
1.0: [116, 232, 464],
1.25: [144, 288, 576],
1.5: [176, 352, 704],
1.75: [210, 420, 840],
2.0: [244, 488, 976],
2.4: [278, 556, 1112],
}
if width_scale not in channels_per_layers_dict.keys():
raise ValueError("Unsupported DiceNet with width scale: {}".format(width_scale))
channels_per_layers = channels_per_layers_dict[width_scale]
layers = [3, 7, 3]
if width_scale > 0.2:
init_block_channels = 24
else:
init_block_channels = 16
channels = [[ci] * li for i, (ci, li) in enumerate(zip(channels_per_layers, layers))]
for i in range(len(channels)):
pred_channels = channels[i - 1][-1] if i != 0 else init_block_channels
channels[i] = [pred_channels * 2] + channels[i]
if width_scale > 2.0:
classifier_mid_channels = 1280
else:
classifier_mid_channels = 1024
if width_scale > 1.0:
dropout_rate = 0.2
else:
dropout_rate = 0.1
net = DiceNet(
channels=channels,
init_block_channels=init_block_channels,
classifier_mid_channels=classifier_mid_channels,
dropout_rate=dropout_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def dicenet_wd5(**kwargs):
"""
DiCENet x0.2 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=0.2, model_name="dicenet_wd5", **kwargs)
def dicenet_wd2(**kwargs):
"""
DiCENet x0.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=0.5, model_name="dicenet_wd2", **kwargs)
def dicenet_w3d4(**kwargs):
"""
DiCENet x0.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=0.75, model_name="dicenet_w3d4", **kwargs)
def dicenet_w1(**kwargs):
"""
DiCENet x1.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.0, model_name="dicenet_w1", **kwargs)
def dicenet_w5d4(**kwargs):
"""
DiCENet x1.25 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.25, model_name="dicenet_w5d4", **kwargs)
def dicenet_w3d2(**kwargs):
"""
DiCENet x1.5 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.5, model_name="dicenet_w3d2", **kwargs)
def dicenet_w7d8(**kwargs):
"""
DiCENet x1.75 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=1.75, model_name="dicenet_w7d8", **kwargs)
def dicenet_w2(**kwargs):
"""
DiCENet x2.0 model from 'DiCENet: Dimension-wise Convolutions for Efficient Networks,'
https://arxiv.org/abs/1906.03516.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dicenet(width_scale=2.0, model_name="dicenet_w2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
models = [
dicenet_wd5,
dicenet_wd2,
dicenet_w3d4,
dicenet_w1,
dicenet_w5d4,
dicenet_w3d2,
dicenet_w7d8,
dicenet_w2,
]
for model in models:
net = model(pretrained=pretrained, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dicenet_wd5 or weight_count == 1130704)
assert (model != dicenet_wd2 or weight_count == 1214120)
assert (model != dicenet_w3d4 or weight_count == 1495676)
assert (model != dicenet_w1 or weight_count == 1805604)
assert (model != dicenet_w5d4 or weight_count == 2162888)
assert (model != dicenet_w3d2 or weight_count == 2652200)
assert (model != dicenet_w7d8 or weight_count == 3264932)
assert (model != dicenet_w2 or weight_count == 3979044)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 31,317
| 32.820734
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/nvpattexp.py
|
"""
Neural Voice Puppetry Audio-to-Expression net for speech-driven facial animation, implemented in Gluon.
Original paper: 'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566.
"""
__all__ = ['NvpAttExp', 'nvpattexp116bazel76']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import Softmax, DenseBlock, ConvBlock, ConvBlock1d, SelectableDense
class NvpAttExpEncoder(HybridBlock):
"""
Neural Voice Puppetry Audio-to-Expression encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
seq_len : int, default
Size of feature window.
encoder_features : int
Number of encoder features.
"""
def __init__(self,
audio_features,
audio_window_size,
seq_len,
encoder_features,
**kwargs):
super(NvpAttExpEncoder, self).__init__(**kwargs)
self.audio_features = audio_features
self.audio_window_size = audio_window_size
self.seq_len = seq_len
conv_channels = (32, 32, 64, 64)
conv_slopes = (0.02, 0.02, 0.2, 0.2)
fc_channels = (128, 64, encoder_features)
fc_slopes = (0.02, 0.02, None)
att_conv_channels = (16, 8, 4, 2, 1)
att_conv_slopes = 0.02
with self.name_scope():
in_channels = audio_features
self.conv_branch = nn.HybridSequential(prefix="")
with self.conv_branch.name_scope():
for i, (out_channels, slope) in enumerate(zip(conv_channels, conv_slopes)):
self.conv_branch.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=(2, 1),
padding=(1, 0),
use_bias=True,
use_bn=False,
activation=nn.LeakyReLU(alpha=slope)))
in_channels = out_channels
self.fc_branch = nn.HybridSequential(prefix="")
with self.fc_branch.name_scope():
for i, (out_channels, slope) in enumerate(zip(fc_channels, fc_slopes)):
activation = nn.LeakyReLU(alpha=slope) if slope is not None else nn.Activation("tanh")
self.fc_branch.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
activation=activation))
in_channels = out_channels
self.att_conv_branch = nn.HybridSequential(prefix="")
with self.att_conv_branch.name_scope():
for i, out_channels, in enumerate(att_conv_channels):
self.att_conv_branch.add(ConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
use_bias=True,
use_bn=False,
activation=nn.LeakyReLU(alpha=att_conv_slopes)))
in_channels = out_channels
self.att_fc = DenseBlock(
in_channels=seq_len,
out_channels=seq_len,
use_bias=True,
use_bn=False,
activation=Softmax(axis=1))
def hybrid_forward(self, F, x):
x = x.reshape((-3, 1, self.audio_window_size, self.audio_features))
x = x.swapaxes(1, 3)
x = self.conv_branch(x)
x = x.reshape((0, 1, -1))
x = self.fc_branch(x)
x = x.reshape((-4, -1, self.seq_len, 0))
x = x.swapaxes(1, 2)
y = x.slice_axis(axis=-1, begin=(self.seq_len // 2), end=(self.seq_len // 2) + 1).squeeze(axis=-1)
w = self.att_conv_branch(x)
w = w.reshape((0, -1))
w = self.att_fc(w)
w = w.expand_dims(axis=-1)
x = F.batch_dot(x, w)
x = x.squeeze(axis=-1)
return x, y
class NvpAttExp(HybridBlock):
"""
Neural Voice Puppetry Audio-to-Expression model from 'Neural Voice Puppetry: Audio-driven Facial Reenactment,'
https://arxiv.org/abs/1912.05566.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
seq_len : int, default 8
Size of feature window.
base_persons : int, default 116
Number of base persons (identities).
blendshapes : int, default 76
Number of 3D model blendshapes.
encoder_features : int, default 32
Number of encoder features.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
seq_len=8,
base_persons=116,
blendshapes=76,
encoder_features=32,
**kwargs):
super(NvpAttExp, self).__init__(**kwargs)
self.base_persons = base_persons
with self.name_scope():
self.encoder = NvpAttExpEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
seq_len=seq_len,
encoder_features=encoder_features)
self.decoder = SelectableDense(
in_channels=encoder_features,
out_channels=blendshapes,
use_bias=False,
num_options=base_persons)
def hybrid_forward(self, F, x, pid):
x, y = self.encoder(x)
x = self.decoder(x, pid)
y = self.decoder(y, pid)
return x, y
def get_nvpattexp(base_persons,
blendshapes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Neural Voice Puppetry Audio-to-Expression model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
blendshapes : int
Number of 3D model blendshapes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = NvpAttExp(
base_persons=base_persons,
blendshapes=blendshapes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def nvpattexp116bazel76(**kwargs):
"""
Neural Voice Puppetry Audio-to-Expression model for 116 base persons and Bazel topology with 76 blendshapes from
'Neural Voice Puppetry: Audio-driven Facial Reenactment,' https://arxiv.org/abs/1912.05566.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nvpattexp(base_persons=116, blendshapes=76, model_name="nvpattexp116bazel76", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
nvpattexp116bazel76,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nvpattexp116bazel76 or weight_count == 327397)
batch = 14
seq_len = 8
audio_window_size = 16
audio_features = 29
blendshapes = 76
x = mx.nd.random.normal(shape=(batch, seq_len, audio_window_size, audio_features), ctx=ctx)
pid = mx.nd.array(np.full(shape=(batch,), fill_value=3), ctx=ctx)
y1, y2 = net(x, pid)
assert (y1.shape == y2.shape == (batch, blendshapes))
if __name__ == "__main__":
_test()
| 9,294
| 33.682836
| 116
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/octresnet.py
|
"""
Oct-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave
Convolution,' https://arxiv.org/abs/1904.05049.
"""
__all__ = ['OctResNet', 'octresnet10_ad2', 'octresnet50b_ad2', 'OctResUnit']
import os
from inspect import isfunction
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ReLU6, DualPathSequential
from .resnet import ResInitBlock
class OctConv(nn.Conv2D):
"""
Octave convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
oct_value : int, default 2
Octave value.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
oct_alpha=0.0,
oct_mode="std",
oct_value=2,
**kwargs):
if isinstance(strides, int):
strides = (strides, strides)
self.downsample = (strides[0] > 1) or (strides[1] > 1)
assert (strides[0] in [1, oct_value]) and (strides[1] in [1, oct_value])
strides = (1, 1)
if oct_mode == "first":
in_alpha = 0.0
out_alpha = oct_alpha
elif oct_mode == "norm":
in_alpha = oct_alpha
out_alpha = oct_alpha
elif oct_mode == "last":
in_alpha = oct_alpha
out_alpha = 0.0
elif oct_mode == "std":
in_alpha = 0.0
out_alpha = 0.0
else:
raise ValueError("Unsupported octave convolution mode: {}".format(oct_mode))
self.h_in_channels = int(in_channels * (1.0 - in_alpha))
self.h_out_channels = int(out_channels * (1.0 - out_alpha))
self.l_out_channels = out_channels - self.h_out_channels
self.oct_alpha = oct_alpha
self.oct_mode = oct_mode
self.oct_value = oct_value
super(OctConv, self).__init__(
in_channels=in_channels,
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
**kwargs)
self.conv_kwargs = self._kwargs.copy()
del self.conv_kwargs["num_filter"]
def hybrid_forward(self, F, hx, lx=None, weight=None, bias=None):
if self.oct_mode == "std":
return super(OctConv, self).hybrid_forward(F, hx, weight=weight, bias=bias), None
if self.downsample:
hx = F.Pooling(
hx,
kernel=(self.oct_value, self.oct_value),
stride=(self.oct_value, self.oct_value),
pool_type="avg")
hhy = F.Convolution(
hx,
weight=weight.slice(begin=(None, None), end=(self.h_out_channels, self.h_in_channels)),
bias=bias.slice(begin=(None,), end=(self.h_out_channels,)) if bias is not None else None,
num_filter=self.h_out_channels,
**self.conv_kwargs)
if self.oct_mode != "first":
hlx = F.Convolution(
lx,
weight=weight.slice(begin=(None, self.h_in_channels), end=(self.h_out_channels, None)),
bias=bias.slice(begin=(None,), end=(self.h_out_channels,)) if bias is not None else None,
num_filter=self.h_out_channels,
**self.conv_kwargs)
if self.oct_mode == "last":
hy = hhy + hlx
ly = None
return hy, ly
lhx = F.Pooling(
hx,
kernel=(self.oct_value, self.oct_value),
stride=(self.oct_value, self.oct_value),
pool_type="avg")
lhy = F.Convolution(
lhx,
weight=weight.slice(begin=(self.h_out_channels, None), end=(None, self.h_in_channels)),
bias=bias.slice(begin=(self.h_out_channels,), end=(None,)) if bias is not None else None,
num_filter=self.l_out_channels,
**self.conv_kwargs)
if self.oct_mode == "first":
hy = hhy
ly = lhy
return hy, ly
if self.downsample:
hly = hlx
llx = F.Pooling(
lx,
kernel=(self.oct_value, self.oct_value),
stride=(self.oct_value, self.oct_value),
pool_type="avg")
else:
hly = F.UpSampling(hlx, scale=self.oct_value, sample_type="nearest")
llx = lx
lly = F.Convolution(
llx,
weight=weight.slice(begin=(self.h_out_channels, self.h_in_channels), end=(None, None)),
bias=bias.slice(begin=(self.h_out_channels,), end=(None,)) if bias is not None else None,
num_filter=self.l_out_channels,
**self.conv_kwargs)
hy = hhy + hly
ly = lhy + lly
return hy, ly
def __repr__(self):
s = '{name}({mapping}, kernel_size={kernel}, stride={stride}'
len_kernel_size = len(self._kwargs['kernel'])
if self._kwargs['pad'] != (0,) * len_kernel_size:
s += ', padding={pad}'
if self._kwargs['dilate'] != (1,) * len_kernel_size:
s += ', dilation={dilate}'
if hasattr(self, 'out_pad') and self.out_pad != (0,) * len_kernel_size:
s += ', output_padding={out_pad}'.format(out_pad=self.out_pad)
if self._kwargs['num_group'] != 1:
s += ', groups={num_group}'
if self.bias is None:
s += ', bias=False'
if self.act:
s += ', {}'.format(self.act)
s += ', oct_alpha={}'.format(self.oct_alpha)
s += ', oct_mode={}'.format(self.oct_mode)
s += ')'
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
mapping='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]),
**self._kwargs)
class OctConvBlock(HybridBlock):
"""
Octave convolution block with Batch normalization and ReLU/ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
oct_alpha=0.0,
oct_mode="std",
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
activate=True,
**kwargs):
super(OctConvBlock, self).__init__(**kwargs)
self.activate = activate
self.last = (oct_mode == "last") or (oct_mode == "std")
out_alpha = 0.0 if self.last else oct_alpha
h_out_channels = int(out_channels * (1.0 - out_alpha))
l_out_channels = out_channels - h_out_channels
with self.name_scope():
self.conv = OctConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
oct_alpha=oct_alpha,
oct_mode=oct_mode)
self.h_bn = nn.BatchNorm(
in_channels=h_out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats)
if not self.last:
self.l_bn = nn.BatchNorm(
in_channels=l_out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats)
if self.activate:
assert (activation is not None)
if isfunction(activation):
self.activ = activation()
elif isinstance(activation, str):
if activation == "relu6":
self.activ = ReLU6()
else:
self.activ = nn.Activation(activation)
else:
self.activ = activation
def hybrid_forward(self, F, hx, lx=None):
hx, lx = self.conv(hx, lx)
hx = self.h_bn(hx)
if self.activate:
hx = self.activ(hx)
if not self.last:
lx = self.l_bn(lx)
if self.activate:
lx = self.activ(lx)
return hx, lx
def oct_conv1x1_block(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
oct_alpha=0.0,
oct_mode="std",
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
activate=True,
**kwargs):
"""
1x1 version of the octave convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return OctConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
activate=activate,
**kwargs)
def oct_conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
oct_alpha=0.0,
oct_mode="std",
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
activate=True,
**kwargs):
"""
3x3 version of the octave convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return OctConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
activate=activate,
**kwargs)
class OctResBlock(HybridBlock):
"""
Simple Oct-ResNet block for residual path in Oct-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
oct_alpha=0.0,
oct_mode="std",
bn_use_global_stats=False,
**kwargs):
super(OctResBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = oct_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = oct_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
oct_alpha=oct_alpha,
oct_mode=("std" if oct_mode == "last" else (oct_mode if oct_mode != "first" else "norm")),
bn_use_global_stats=bn_use_global_stats,
activation=None,
activate=False)
def hybrid_forward(self, F, hx, lx=None):
hx, lx = self.conv1(hx, lx)
hx, lx = self.conv2(hx, lx)
return hx, lx
class OctResBottleneck(HybridBlock):
"""
Oct-ResNet bottleneck block for residual path in Oct-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
oct_alpha=0.0,
oct_mode="std",
bn_use_global_stats=False,
conv1_stride=False,
bottleneck_factor=4,
**kwargs):
super(OctResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = oct_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
oct_alpha=oct_alpha,
oct_mode=(oct_mode if oct_mode != "last" else "norm"),
bn_use_global_stats=bn_use_global_stats)
self.conv2 = oct_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
padding=padding,
dilation=dilation,
oct_alpha=oct_alpha,
oct_mode=(oct_mode if oct_mode != "first" else "norm"),
bn_use_global_stats=bn_use_global_stats)
self.conv3 = oct_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
oct_alpha=oct_alpha,
oct_mode=("std" if oct_mode == "last" else (oct_mode if oct_mode != "first" else "norm")),
bn_use_global_stats=bn_use_global_stats,
activation=None,
activate=False)
def hybrid_forward(self, F, hx, lx=None):
hx, lx = self.conv1(hx, lx)
hx, lx = self.conv2(hx, lx)
hx, lx = self.conv3(hx, lx)
return hx, lx
class OctResUnit(HybridBlock):
"""
Oct-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
oct_alpha : float, default 0.0
Octave alpha coefficient.
oct_mode : str, default 'std'
Octave convolution mode. It can be 'first', 'norm', 'last', or 'std'.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
oct_alpha=0.0,
oct_mode="std",
bn_use_global_stats=False,
bottleneck=True,
conv1_stride=False,
**kwargs):
super(OctResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1) or\
((oct_mode == "first") and (oct_alpha != 0.0))
with self.name_scope():
if bottleneck:
self.body = OctResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = OctResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = oct_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats,
activation=None,
activate=False)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, hx, lx=None):
if self.resize_identity:
h_identity, l_identity = self.identity_conv(hx, lx)
else:
h_identity, l_identity = hx, lx
hx, lx = self.body(hx, lx)
hx = hx + h_identity
hx = self.activ(hx)
if lx is not None:
lx = lx + l_identity
lx = self.activ(lx)
return hx, lx
class OctResNet(HybridBlock):
"""
Oct-ResNet model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave
Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
oct_alpha : float, default 0.5
Octave alpha coefficient.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
oct_alpha=0.5,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(OctResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=1,
prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if (i == 0) and (j == 0):
oct_mode = "first"
elif (i == len(channels) - 1) and (j == 0):
oct_mode = "last"
elif (i == len(channels) - 1) and (j != 0):
oct_mode = "std"
else:
oct_mode = "norm"
stage.add(OctResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_octresnet(blocks,
bottleneck=None,
conv1_stride=True,
oct_alpha=0.5,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Oct-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
oct_alpha : float, default 0.5
Octave alpha coefficient.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported Oct-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = OctResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
oct_alpha=oct_alpha,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def octresnet10_ad2(**kwargs):
"""
Oct-ResNet-10 (alpha=1/2) model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks
with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet(blocks=10, oct_alpha=0.5, model_name="octresnet10_ad2", **kwargs)
def octresnet50b_ad2(**kwargs):
"""
Oct-ResNet-50b (alpha=1/2) model from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks
with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet(blocks=50, conv1_stride=False, oct_alpha=0.5, model_name="octresnet50b_ad2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
octresnet10_ad2,
octresnet50b_ad2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != octresnet10_ad2 or weight_count == 5423016)
assert (model != octresnet50b_ad2 or weight_count == 25557032)
x = mx.nd.zeros((14, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 32,656
| 35.245283
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/alexnet.py
|
"""
AlexNet for ImageNet-1K, implemented in Gluon.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['AlexNet', 'alexnet', 'alexnetb']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock
class AlexConv(ConvBlock):
"""
AlexNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_lrn : bool
Whether to use LRN layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_lrn,
**kwargs):
super(AlexConv, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
use_bn=False,
**kwargs)
self.use_lrn = use_lrn
def hybrid_forward(self, F, x):
x = super(AlexConv, self).hybrid_forward(F, x)
if self.use_lrn:
x = F.LRN(x, nsize=5)
return x
class AlexDense(HybridBlock):
"""
AlexNet specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(AlexDense, self).__init__(**kwargs)
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
in_units=in_channels)
self.activ = nn.Activation("relu")
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class AlexOutputBlock(HybridBlock):
"""
AlexNet specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(AlexOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
with self.name_scope():
self.fc1 = AlexDense(
in_channels=in_channels,
out_channels=mid_channels)
self.fc2 = AlexDense(
in_channels=mid_channels,
out_channels=mid_channels)
self.fc3 = nn.Dense(
units=classes,
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class AlexNet(HybridBlock):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
use_lrn : bool
Whether to use LRN layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
kernel_sizes,
strides,
paddings,
use_lrn,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AlexNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
use_lrn_i = use_lrn and (i in [0, 1])
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(AlexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i][j],
strides=strides[i][j],
padding=paddings[i][j],
use_lrn=use_lrn_i))
in_channels = out_channels
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True))
self.features.add(stage)
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
in_channels = in_channels * 6 * 6
self.output.add(AlexOutputBlock(
in_channels=in_channels,
classes=classes))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_alexnet(version="a",
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of AlexNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[0], [2], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
use_lrn = False
else:
raise ValueError("Unsupported AlexNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def alexnetb(**kwargs):
"""
AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997. Non-standard version.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_alexnet(version="b", model_name="alexnetb", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
alexnet,
alexnetb,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 62378344)
assert (model != alexnetb or weight_count == 61100840)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,854
| 29.137615
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/mobilenet_cub.py
|
"""
MobileNet & FD-MobileNet for CUB-200-2011, implemented in Gluon.
Original papers:
- 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
- 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
"""
__all__ = ['mobilenet_w1_cub', 'mobilenet_w3d4_cub', 'mobilenet_wd2_cub', 'mobilenet_wd4_cub', 'fdmobilenet_w1_cub',
'fdmobilenet_w3d4_cub', 'fdmobilenet_wd2_cub', 'fdmobilenet_wd4_cub']
from .mobilenet import get_mobilenet
from .fdmobilenet import get_fdmobilenet
def mobilenet_w1_cub(classes=200, **kwargs):
"""
1.0 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=1.0, model_name="mobilenet_w1_cub", **kwargs)
def mobilenet_w3d4_cub(classes=200, **kwargs):
"""
0.75 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=0.75, model_name="mobilenet_w3d4_cub", **kwargs)
def mobilenet_wd2_cub(classes=200, **kwargs):
"""
0.5 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=0.5, model_name="mobilenet_wd2_cub", **kwargs)
def mobilenet_wd4_cub(classes=200, **kwargs):
"""
0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(classes=classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs)
def fdmobilenet_w1_cub(classes=200, **kwargs):
"""
FD-MobileNet 1.0x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=1.0, model_name="fdmobilenet_w1_cub", **kwargs)
def fdmobilenet_w3d4_cub(classes=200, **kwargs):
"""
FD-MobileNet 0.75x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=0.75, model_name="fdmobilenet_w3d4_cub", **kwargs)
def fdmobilenet_wd2_cub(classes=200, **kwargs):
"""
FD-MobileNet 0.5x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=0.5, model_name="fdmobilenet_wd2_cub", **kwargs)
def fdmobilenet_wd4_cub(classes=200, **kwargs):
"""
FD-MobileNet 0.25x model for CUB-200-2011 from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(classes=classes, width_scale=0.25, model_name="fdmobilenet_wd4_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenet_w1_cub,
mobilenet_w3d4_cub,
mobilenet_wd2_cub,
mobilenet_wd4_cub,
fdmobilenet_w1_cub,
fdmobilenet_w3d4_cub,
fdmobilenet_wd2_cub,
fdmobilenet_wd4_cub,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1_cub or weight_count == 3411976)
assert (model != mobilenet_w3d4_cub or weight_count == 1970360)
assert (model != mobilenet_wd2_cub or weight_count == 921192)
assert (model != mobilenet_wd4_cub or weight_count == 264472)
assert (model != fdmobilenet_w1_cub or weight_count == 2081288)
assert (model != fdmobilenet_w3d4_cub or weight_count == 1218104)
assert (model != fdmobilenet_wd2_cub or weight_count == 583528)
assert (model != fdmobilenet_wd4_cub or weight_count == 177560)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 200))
if __name__ == "__main__":
_test()
| 7,904
| 35.597222
| 120
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/wrn.py
|
"""
WRN for ImageNet-1K, implemented in Gluon.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['WRN', 'wrn50_2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class WRNConv(HybridBlock):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
activate,
**kwargs):
super(WRNConv, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
def wrn_conv1x1(in_channels,
out_channels,
strides,
activate):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
activate=activate)
def wrn_conv3x3(in_channels,
out_channels,
strides,
activate):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
activate=activate)
class WRNBottleneck(HybridBlock):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width_factor,
**kwargs):
super(WRNBottleneck, self).__init__(**kwargs)
mid_channels = int(round(out_channels // 4 * width_factor))
with self.name_scope():
self.conv1 = wrn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
activate=True)
self.conv2 = wrn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activate=True)
self.conv3 = wrn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
activate=False)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class WRNUnit(HybridBlock):
"""
WRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width_factor,
**kwargs):
super(WRNUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = WRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width_factor=width_factor)
if self.resize_identity:
self.identity_conv = wrn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activate=False)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class WRNInitBlock(HybridBlock):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(WRNInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
activate=True)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class WRN(HybridBlock):
"""
WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width_factor : float
Wide scale factor for width of layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
width_factor,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(WRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(WRNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(WRNUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width_factor=width_factor))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_wrn(blocks,
width_factor,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create WRN model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_factor : float
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = WRN(
channels=channels,
init_block_channels=init_block_channels,
width_factor=width_factor,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def wrn50_2(**kwargs):
"""
WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
wrn50_2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn50_2 or weight_count == 68849128)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,149
| 27.723404
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/inceptionv3.py
|
"""
InceptionV3 for ImageNet-1K, implemented in Gluon.
Original paper: 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
"""
__all__ = ['InceptionV3', 'inceptionv3', 'inceptionv3_gl', 'MaxPoolBranch', 'AvgPoolBranch', 'Conv1x1Branch',
'ConvSeqBranch']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import ConvBlock, conv1x1_block, conv3x3_block
class MaxPoolBranch(HybridBlock):
"""
Inception specific max pooling branch block.
"""
def __init__(self,
**kwargs):
super(MaxPoolBranch, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
class AvgPoolBranch(HybridBlock):
"""
Inception specific average pooling branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
count_include_pad : bool, default True
Whether to include the zero-padding in the averaging calculation.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
count_include_pad=True,
**kwargs):
super(AvgPoolBranch, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=3,
strides=1,
padding=1,
count_include_pad=count_include_pad)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class Conv1x1Branch(HybridBlock):
"""
Inception specific convolutional 1x1 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(Conv1x1Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class ConvSeqBranch(HybridBlock):
"""
Inception specific convolutional sequence branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
with self.name_scope():
self.conv_list = nn.HybridSequential(prefix="")
for out_channels, kernel_size, strides, padding in zip(
out_channels_list, kernel_size_list, strides_list, padding_list):
self.conv_list.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv_list(x)
return x
class ConvSeq3x3Branch(HybridBlock):
"""
InceptionV3 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ConvSeq3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv_list = nn.HybridSequential(prefix="")
for out_channels, kernel_size, strides, padding in zip(
out_channels_list, kernel_size_list, strides_list, padding_list):
self.conv_list.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv_list(x)
y1 = self.conv1x3(x)
y2 = self.conv3x1(x)
x = F.concat(y1, y2, dim=1)
return x
class InceptionAUnit(HybridBlock):
"""
InceptionV3 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
assert (out_channels > 224)
pool_out_channels = out_channels - 224
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=64,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(48, 64),
kernel_size_list=(1, 5),
strides_list=(1, 1),
padding_list=(0, 2),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=pool_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionAUnit(HybridBlock):
"""
InceptionV3 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
assert (in_channels == 288)
assert (out_channels == 768)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptionBUnit(HybridBlock):
"""
InceptionV3 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of output channels in the 7x7 branches.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
assert (in_channels == 768)
assert (out_channels == 768)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=192,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(mid_channels, mid_channels, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(mid_channels, mid_channels, mid_channels, mid_channels, 192),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=192,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBUnit(HybridBlock):
"""
InceptionV3 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
assert (in_channels == 768)
assert (out_channels == 1280)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 320),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 192, 192),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptionCUnit(HybridBlock):
"""
InceptionV3 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
assert (out_channels == 2048)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=320,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels_list=(448, 384),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=192,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionV3 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
assert (out_channels == 192)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool1 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool2 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool1(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool2(x)
return x
class InceptionV3(HybridBlock):
"""
InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
b_mid_channels : list of int
Number of middle channels for each Inception-B unit.
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
b_mid_channels,
dropout_rate=0.5,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
if unit == InceptionBUnit:
stage.add(unit(
in_channels=in_channels,
out_channels=out_channels,
mid_channels=b_mid_channels[j - 1],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
else:
stage.add(unit(
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionv3(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionV3 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 192
channels = [[256, 288, 288],
[768, 768, 768, 768, 768],
[1280, 2048, 2048]]
b_mid_channels = [128, 160, 160, 192]
net = InceptionV3(
channels=channels,
init_block_channels=init_block_channels,
b_mid_channels=b_mid_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionv3(**kwargs):
"""
InceptionV3 model from 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionv3(model_name="inceptionv3", bn_epsilon=1e-3, **kwargs)
def inceptionv3_gl(**kwargs):
"""
InceptionV3 model (Gluon-like) from 'Rethinking the Inception Architecture for Computer Vision,'
https://arxiv.org/abs/1512.00567.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionv3(model_name="inceptionv3_gl", bn_epsilon=1e-5, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionv3,
inceptionv3_gl,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv3 or weight_count == 23834568)
assert (model != inceptionv3_gl or weight_count == 23834568)
x = mx.nd.zeros((1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 27,784
| 33.644638
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/fdmobilenet.py
|
"""
FD-MobileNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
"""
__all__ = ['fdmobilenet_w1', 'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4', 'get_fdmobilenet']
import os
from mxnet import cpu
from .mobilenet import MobileNet
def get_fdmobilenet(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FD-MobileNet model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]]
first_stage_stride = True
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
net = MobileNet(
channels=channels,
first_stage_stride=first_stage_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fdmobilenet_w1(**kwargs):
"""
FD-MobileNet 1.0x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=1.0, model_name="fdmobilenet_w1", **kwargs)
def fdmobilenet_w3d4(**kwargs):
"""
FD-MobileNet 0.75x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs)
def fdmobilenet_wd2(**kwargs):
"""
FD-MobileNet 0.5x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs)
def fdmobilenet_wd4(**kwargs):
"""
FD-MobileNet 0.25x model from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fdmobilenet(width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fdmobilenet_w1,
fdmobilenet_w3d4,
fdmobilenet_wd2,
fdmobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fdmobilenet_w1 or weight_count == 2901288)
assert (model != fdmobilenet_w3d4 or weight_count == 1833304)
assert (model != fdmobilenet_wd2 or weight_count == 993928)
assert (model != fdmobilenet_wd4 or weight_count == 383160)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 5,360
| 30.910714
| 115
|
py
|
imgclsmob
|
imgclsmob-master/gluon/gluoncv2/models/others/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/gluon/metrics/seg_metrics_np.py
|
"""
Routines for segmentation metrics on numpy.
"""
import numpy as np
__all__ = ['seg_pixel_accuracy_np', 'segm_mean_accuracy_hmasks', 'segm_mean_accuracy', 'seg_mean_iou_np',
'segm_mean_iou2', 'seg_mean_iou_imasks_np', 'segm_fw_iou_hmasks', 'segm_fw_iou']
def seg_pixel_accuracy_np(label_imask,
pred_imask,
vague_idx=-1,
use_vague=False,
macro_average=True,
empty_result=0.0):
"""
The segmentation pixel accuracy.
Parameters:
----------
label_imask : np.array
Ground truth index mask (maybe batch of).
pred_imask : np.array
Predicted index mask (maybe batch of).
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
macro_average : bool, default True
Whether to use micro or macro averaging.
empty_result : float, default 0.0
Result value for an image without any classes.
Returns:
-------
float or tuple of two ints
PA metric value.
"""
assert (label_imask.shape == pred_imask.shape)
if use_vague:
sum_u_ij = np.sum(label_imask.flat != vague_idx)
if sum_u_ij == 0:
if macro_average:
return empty_result
else:
return 0, 0
sum_u_ii = np.sum(np.logical_and(pred_imask.flat == label_imask.flat, label_imask.flat != vague_idx))
else:
sum_u_ii = np.sum(pred_imask.flat == label_imask.flat)
sum_u_ij = pred_imask.size
if macro_average:
return float(sum_u_ii) / sum_u_ij
else:
return sum_u_ii, sum_u_ij
def segm_mean_accuracy_hmasks(label_hmask,
pred_hmask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_hmask : np.array
Predicted one-hot mask.
Returns:
-------
float
MA metric value.
"""
assert (pred_hmask.shape == label_hmask.shape)
assert (len(pred_hmask.shape) == 3)
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = pred_hmask[i, :, :]
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
if u_i == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def segm_mean_accuracy(label_hmask,
pred_imask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_imask : np.array
Predicted index mask.
Returns:
-------
float
MA metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
if u_i == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def segm_mean_iou_imasks(label_hmask,
pred_hmask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_hmask : np.array
Predicted one-hot mask.
Returns:
-------
float
MA metric value.
"""
assert (pred_hmask.shape == label_hmask.shape)
assert (len(pred_hmask.shape) == 3)
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = pred_hmask[i, :, :]
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
if u_i == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def seg_mean_iou_np(label_hmask,
pred_imask):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_imask : np.array
Predicted index mask.
Returns:
-------
float
MIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
i_sum = 0
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
u_ji_sj = np.sum(class_i_pred_mask)
if (u_i + u_ji_sj) == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
acc_iou += float(u_ii) / (u_i + u_ji_sj - u_ii)
i_sum += 1
if i_sum > 0:
mean_iou = acc_iou / i_sum
else:
mean_iou = 1.0
return mean_iou
def segm_mean_iou2(label_hmask,
pred_hmask):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask (batch of).
pred_hmask : nd.array
Predicted one-hot mask (batch of).
Returns:
-------
float
MIoU metric value.
"""
assert (len(label_hmask.shape) == 4)
assert (len(pred_hmask.shape) == 4)
assert (pred_hmask.shape == label_hmask.shape)
eps = np.finfo(np.float32).eps
class_axis = 1 # The axis that represents classes
inter_hmask = label_hmask * pred_hmask
u_i = label_hmask.sum(axis=[2, 3])
u_ji_sj = pred_hmask.sum(axis=[2, 3])
u_ii = inter_hmask.sum(axis=[2, 3])
class_count = (u_i + u_ji_sj > 0.0).sum(axis=class_axis) + eps
class_acc = u_ii / (u_i + u_ji_sj - u_ii + eps)
acc_iou = class_acc.sum(axis=class_axis) + eps
mean_iou = (acc_iou / class_count).mean().asscalar()
return mean_iou
def seg_mean_iou_imasks_np(label_imask,
pred_imask,
num_classes,
vague_idx=-1,
use_vague=False,
bg_idx=-1,
ignore_bg=False,
macro_average=True,
empty_result=0.0):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_imask : nd.array
Ground truth index mask (batch of).
pred_imask : nd.array
Predicted index mask (batch of).
num_classes : int
Number of classes.
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
bg_idx : int, default -1
Index of background class.
ignore_bg : bool, default False
Whether to ignore background class.
macro_average : bool, default True
Whether to use micro or macro averaging.
empty_result : float, default 0.0
Result value for an image without any classes.
Returns:
-------
float or tuple of two np.arrays of int
MIoU metric value.
"""
assert (len(label_imask.shape) == 2)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_imask.shape)
min_i = 1
max_i = num_classes
n_bins = num_classes
if ignore_bg:
n_bins -= 1
if bg_idx != 0:
assert (bg_idx == num_classes - 1)
max_i -= 1
if not (ignore_bg and (bg_idx == 0)):
label_imask += 1
pred_imask += 1
vague_idx += 1
if use_vague:
label_imask = label_imask * (label_imask != vague_idx)
pred_imask = pred_imask * (pred_imask != vague_idx)
intersection = pred_imask * (pred_imask == label_imask)
area_inter, _ = np.histogram(intersection, bins=n_bins, range=(min_i, max_i))
area_pred, _ = np.histogram(pred_imask, bins=n_bins, range=(min_i, max_i))
area_label, _ = np.histogram(label_imask, bins=n_bins, range=(min_i, max_i))
area_union = area_pred + area_label - area_inter
assert ((not ignore_bg) or (len(area_inter) == num_classes - 1))
assert (ignore_bg or (len(area_inter) == num_classes))
if macro_average:
class_count = (area_union > 0).sum()
if class_count == 0:
return empty_result
eps = np.finfo(np.float32).eps
area_union = area_union + eps
mean_iou = (area_inter / area_union).sum() / class_count
return mean_iou
else:
return area_inter.astype(np.uint64), area_union.astype(np.uint64)
def segm_fw_iou_hmasks(label_hmask,
pred_hmask):
"""
The segmentation frequency weighted intersection over union.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_hmask : np.array
Predicted one-hot mask.
Returns:
-------
float
FrIoU metric value.
"""
assert (pred_hmask.shape == label_hmask.shape)
assert (len(pred_hmask.shape) == 3)
n = label_hmask.shape[0]
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = pred_hmask[i, :, :]
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
u_ji_sj = np.sum(class_i_pred_mask)
if (u_i + u_ji_sj) == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
acc_iou += float(u_i * u_ii) / (u_i + u_ji_sj - u_ii)
fw_factor = pred_hmask[0].size
return acc_iou / fw_factor
def segm_fw_iou(label_hmask,
pred_imask):
"""
The segmentation frequency weighted intersection over union.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_imask : np.array
Predicted index mask.
Returns:
-------
float
FrIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
u_ji_sj = np.sum(class_i_pred_mask)
if (u_i + u_ji_sj) == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
acc_iou += float(u_i * u_ii) / (u_i + u_ji_sj - u_ii)
fw_factor = pred_imask.size
return acc_iou / fw_factor
| 11,447
| 25.5
| 109
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/seg_metrics_nd.py
|
"""
Routines for segmentation metrics on mx.ndarray.
"""
import numpy as np
import mxnet as mx
__all__ = ['seg_pixel_accuracy_nd', 'segm_mean_accuracy', 'segm_mean_iou', 'seg_mean_iou2_nd', 'segm_fw_iou',
'segm_fw_iou2']
def seg_pixel_accuracy_nd(label_imask,
pred_imask,
vague_idx=-1,
use_vague=False,
macro_average=True,
empty_result=0.0):
"""
The segmentation pixel accuracy (for MXNet nd-arrays).
Parameters:
----------
label_imask : mx.nd.array
Ground truth index mask (maybe batch of).
pred_imask : mx.nd.array
Predicted index mask (maybe batch of).
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
macro_average : bool, default True
Whether to use micro or macro averaging.
empty_result : float, default 0.0
Result value for an image without any classes.
Returns:
-------
float or tuple of two floats
PA metric value.
"""
assert (label_imask.shape == pred_imask.shape)
if use_vague:
mask = (label_imask != vague_idx)
sum_u_ij = mask.sum().asscalar()
if sum_u_ij == 0:
if macro_average:
return empty_result
else:
return 0, 0
sum_u_ii = ((label_imask == pred_imask) * mask).sum().asscalar()
else:
sum_u_ii = (label_imask == pred_imask).sum().asscalar()
sum_u_ij = pred_imask.size
if macro_average:
return float(sum_u_ii) / sum_u_ij
else:
return sum_u_ii, sum_u_ij
def segm_mean_accuracy(label_hmask,
pred_imask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask.
pred_imask : nd.array
Predicted index mask.
Returns:
-------
float
MA metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = class_i_label_mask.sum().asscalar()
if u_i == 0:
continue
u_ii = (class_i_pred_mask * class_i_label_mask).sum().asscalar()
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def segm_mean_iou(label_hmask,
pred_imask):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask.
pred_imask : nd.array
Predicted index mask.
Returns:
-------
float
MIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
i_sum = 0
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = class_i_label_mask.sum().asscalar()
u_ji_sj = class_i_pred_mask.sum().asscalar()
if (u_i + u_ji_sj) == 0:
continue
u_ii = (class_i_pred_mask * class_i_label_mask).sum().asscalar()
acc_iou += float(u_ii) / (u_i + u_ji_sj - u_ii)
i_sum += 1
if i_sum > 0:
mean_iou = acc_iou / i_sum
else:
mean_iou = 1.0
return mean_iou
def seg_mean_iou2_nd(label_hmask,
pred_hmask):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask (batch of).
pred_hmask : nd.array
Predicted one-hot mask (batch of).
Returns:
-------
float
MIoU metric value.
"""
assert (len(label_hmask.shape) == 4)
assert (len(pred_hmask.shape) == 4)
assert (pred_hmask.shape == label_hmask.shape)
eps = np.finfo(np.float32).eps
batch_axis = 0 # The axis that represents mini-batch
class_axis = 1 # The axis that represents classes
inter_hmask = label_hmask * pred_hmask
u_i = label_hmask.sum(axis=[batch_axis, class_axis], exclude=True)
u_ji_sj = pred_hmask.sum(axis=[batch_axis, class_axis], exclude=True)
u_ii = inter_hmask.sum(axis=[batch_axis, class_axis], exclude=True)
class_count = (u_i + u_ji_sj > 0.0).sum(axis=class_axis) + eps
class_acc = u_ii / (u_i + u_ji_sj - u_ii + eps)
acc_iou = class_acc.sum(axis=class_axis) + eps
mean_iou = (acc_iou / class_count).mean().asscalar()
return mean_iou
def segm_fw_iou(label_hmask,
pred_imask):
"""
The segmentation frequency weighted intersection over union.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask.
pred_imask : nd.array
Predicted index mask.
Returns:
-------
float
FrIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = class_i_label_mask.sum().asscalar()
u_ji_sj = class_i_pred_mask.sum().asscalar()
if (u_i + u_ji_sj) == 0:
continue
u_ii = (class_i_pred_mask * class_i_label_mask).sum().asscalar()
acc_iou += float(u_i) * float(u_ii) / (u_i + u_ji_sj - u_ii)
fw_factor = pred_imask.size
return acc_iou / fw_factor
def segm_fw_iou2(label_hmask,
pred_imask):
"""
The segmentation frequency weighted intersection over union.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask.
pred_imask : nd.array
Predicted index mask.
Returns:
-------
float
FrIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
acc_iou = mx.nd.array([0.0], ctx=label_hmask.context)
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = class_i_label_mask.sum()
u_ji_sj = class_i_pred_mask.sum()
if (u_i + u_ji_sj).asscalar() == 0:
continue
u_ii = (class_i_pred_mask * class_i_label_mask).sum()
acc_iou += mx.nd.cast(u_i, dtype=np.float32) *\
mx.nd.cast(u_ii, dtype=np.float32) / mx.nd.cast(u_i + u_ji_sj - u_ii, dtype=np.float32)
fw_factor = pred_imask.size
return acc_iou.asscalar() / fw_factor
| 7,161
| 25.924812
| 109
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/seg_metrics.py
|
"""
Evaluation Metrics for Semantic Segmentation.
"""
__all__ = ['PixelAccuracyMetric', 'MeanIoUMetric']
import numpy as np
import mxnet as mx
from .seg_metrics_np import seg_pixel_accuracy_np, seg_mean_iou_imasks_np
from .seg_metrics_nd import seg_pixel_accuracy_nd
class PixelAccuracyMetric(mx.metric.EvalMetric):
"""
Computes the pixel-wise accuracy.
Parameters:
----------
axis : int, default 1
The axis that represents classes.
name : str, default 'pix_acc'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
on_cpu : bool, default True
Calculate on CPU.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
macro_average : bool, default True
Whether to use micro or macro averaging.
aux : bool, default False
Whether to support auxiliary predictions.
"""
def __init__(self,
axis=1,
name="pix_acc",
output_names=None,
label_names=None,
on_cpu=True,
sparse_label=True,
vague_idx=-1,
use_vague=False,
macro_average=True,
aux=False):
if name == "pix_acc":
name = "{}-pix_acc".format("macro" if macro_average else "micro")
self.macro_average = macro_average
super(PixelAccuracyMetric, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names)
self.axis = axis
self.on_cpu = on_cpu
self.sparse_label = sparse_label
self.vague_idx = vague_idx
self.use_vague = use_vague
self.aux = aux
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
if self.aux:
preds = [p[0] for p in preds]
assert (len(labels) == len(preds))
if self.on_cpu:
for label, pred in zip(labels, preds):
if self.sparse_label:
label_imask = label.asnumpy().astype(np.int32)
else:
label_imask = mx.nd.argmax(label, axis=self.axis).asnumpy().astype(np.int32)
pred_imask = mx.nd.argmax(pred, axis=self.axis).asnumpy().astype(np.int32)
acc = seg_pixel_accuracy_np(
label_imask=label_imask,
pred_imask=pred_imask,
vague_idx=self.vague_idx,
use_vague=self.use_vague,
macro_average=self.macro_average)
if self.macro_average:
self.sum_metric += acc
self.num_inst += 1
else:
self.sum_metric += acc[0]
self.num_inst += acc[1]
else:
for label, pred in zip(labels, preds):
if self.sparse_label:
label_imask = mx.nd.cast(label, dtype=np.int32)
else:
label_imask = mx.nd.cast(mx.nd.argmax(label, axis=self.axis), dtype=np.int32)
pred_imask = mx.nd.cast(mx.nd.argmax(pred, axis=self.axis), dtype=np.int32)
acc = seg_pixel_accuracy_nd(
label_imask=label_imask,
pred_imask=pred_imask,
vague_idx=self.vague_idx,
use_vague=self.use_vague,
macro_average=self.macro_average)
if self.macro_average:
self.sum_metric += acc
self.num_inst += 1
else:
self.sum_metric += acc[0]
self.num_inst += acc[1]
def reset(self):
"""
Resets the internal evaluation result to initial state.
"""
if self.macro_average:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = 0
self.sum_metric = 0
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.macro_average:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.sum_metric / self.num_inst
else:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, float(self.sum_metric) / self.num_inst
class MeanIoUMetric(mx.metric.EvalMetric):
"""
Computes the mean intersection over union.
Parameters:
----------
axis : int, default 1
The axis that represents classes
name : str, default 'mean_iou'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
on_cpu : bool, default True
Calculate on CPU.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
num_classes : int
Number of classes
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
bg_idx : int, default -1
Index of background class.
ignore_bg : bool, default False
Whether to ignore background class.
macro_average : bool, default True
Whether to use micro or macro averaging.
"""
def __init__(self,
axis=1,
name="mean_iou",
output_names=None,
label_names=None,
on_cpu=True,
sparse_label=True,
num_classes=None,
vague_idx=-1,
use_vague=False,
bg_idx=-1,
ignore_bg=False,
macro_average=True):
if name == "pix_acc":
name = "{}-pix_acc".format("macro" if macro_average else "micro")
self.macro_average = macro_average
self.num_classes = num_classes
self.ignore_bg = ignore_bg
super(MeanIoUMetric, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names)
assert ((not ignore_bg) or (bg_idx in (0, num_classes - 1)))
self.axis = axis
self.on_cpu = on_cpu
self.sparse_label = sparse_label
self.vague_idx = vague_idx
self.use_vague = use_vague
self.bg_idx = bg_idx
assert (on_cpu and sparse_label)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
assert (len(labels) == len(preds))
if self.on_cpu:
for label, pred in zip(labels, preds):
if self.sparse_label:
label_imask = label.asnumpy().astype(np.int32)
# else:
# label_hmask = label.asnumpy().astype(np.int32)
pred_imask = mx.nd.argmax(pred, axis=self.axis).asnumpy().astype(np.int32)
batch_size = label.shape[0]
for k in range(batch_size):
if self.sparse_label:
acc = seg_mean_iou_imasks_np(
label_imask=label_imask[k, :, :],
pred_imask=pred_imask[k, :, :],
num_classes=self.num_classes,
vague_idx=self.vague_idx,
use_vague=self.use_vague,
bg_idx=self.bg_idx,
ignore_bg=self.ignore_bg,
macro_average=self.macro_average)
# else:
# acc = seg_mean_iou_np(
# label_hmask=label_hmask[k, :, :, :],
# pred_imask=pred_imask[k, :, :])
if self.macro_average:
self.sum_metric += acc
self.num_inst += 1
else:
self.area_inter += acc[0]
self.area_union += acc[1]
# else:
# for label, pred in zip(labels, preds):
# if self.sparse_label:
# label_imask = label
# n = self.num_classes
# label_hmask = mx.nd.one_hot(label_imask, depth=n).transpose((0, 3, 1, 2))
# else:
# label_hmask = label
# n = label_hmask.shape[1]
# pred_imask = mx.nd.argmax(pred, axis=self.axis)
# pred_hmask = mx.nd.one_hot(pred_imask, depth=n).transpose((0, 3, 1, 2))
# acc = seg_mean_iou2_nd(
# label_hmask=label_hmask,
# pred_hmask=pred_hmask)
# self.sum_metric += acc
# self.num_inst += 1
def reset(self):
"""
Resets the internal evaluation result to initial state.
"""
if self.macro_average:
self.num_inst = 0
self.sum_metric = 0.0
else:
class_count = self.num_classes - 1 if self.ignore_bg else self.num_classes
self.area_inter = np.zeros((class_count,), np.uint64)
self.area_union = np.zeros((class_count,), np.uint64)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.macro_average:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.sum_metric / self.num_inst
else:
class_count = (self.area_union > 0).sum()
if class_count == 0:
return self.name, float("nan")
eps = np.finfo(np.float32).eps
area_union_eps = self.area_union + eps
mean_iou = (self.area_inter / area_union_eps).sum() / class_count
return self.name, mean_iou
| 11,492
| 35.485714
| 97
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/cls_metrics.py
|
"""
Evaluation Metrics for Image Classification.
"""
import mxnet as mx
__all__ = ['Top1Error', 'TopKError']
class Top1Error(mx.metric.Accuracy):
"""
Computes top-1 error (inverted accuracy classification score).
Parameters:
----------
axis : int, default 1
The axis that represents classes.
name : str, default 'top_1_error'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
axis=1,
name="top_1_error",
output_names=None,
label_names=None):
super(Top1Error, self).__init__(
axis=axis,
name=name,
output_names=output_names,
label_names=label_names)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, 1.0 - self.sum_metric / self.num_inst
class TopKError(mx.metric.TopKAccuracy):
"""
Computes top-k error (inverted top k predictions accuracy).
Parameters:
----------
top_k : int
Whether targets are out of top k predictions, default 1
name : str, default 'top_k_error'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
top_k=1,
name="top_k_error",
output_names=None,
label_names=None):
name_ = name
super(TopKError, self).__init__(
top_k=top_k,
name=name,
output_names=output_names,
label_names=label_names)
self.name = name_.replace("_k_", "_{}_".format(top_k))
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, 1.0 - self.sum_metric / self.num_inst
| 2,977
| 28.78
| 79
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/metrics.py
|
"""
Evaluation metrics for common tasks.
"""
import mxnet as mx
if mx.__version__ < "2.0.0":
from mxnet.metric import EvalMetric
else:
from mxnet.gluon.metric import EvalMetric
__all__ = ['LossValue']
class LossValue(EvalMetric):
"""
Computes simple loss value fake metric.
Parameters:
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
name="loss",
output_names=None,
label_names=None):
super(LossValue, self).__init__(
name,
output_names=output_names,
label_names=label_names)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : None
Unused argument.
preds : list of `NDArray`
Loss values.
"""
loss = sum([ll.mean().asscalar() for ll in preds]) / len(preds)
self.sum_metric += loss
self.global_sum_metric += loss
self.num_inst += 1
self.global_num_inst += 1
| 1,444
| 25.759259
| 79
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/gluon/metrics/det_metrics.py
|
"""
Evaluation Metrics for Object Detection.
"""
import os
import math
import warnings
import numpy as np
import mxnet as mx
from collections import defaultdict
__all__ = ['CocoDetMApMetric', 'VOC07MApMetric', 'WiderfaceDetMetric']
class CocoDetMApMetric(mx.metric.EvalMetric):
"""
Detection metric for COCO bbox task.
Parameters:
----------
img_height : int
Processed image height.
coco_annotations_file_path : str
COCO anotation file path.
contiguous_id_to_json : list of int
Processed IDs.
validation_ids : bool, default False
Whether to use temporary file for estimation.
use_file : bool, default False
Whether to use temporary file for estimation.
score_thresh : float, default 0.05
Detection results with confident scores smaller than `score_thresh` will be discarded before saving to results.
data_shape : tuple of int, default is None
If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions.
This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be
fixed for all validation images.
post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h)
If not None, the bounding boxes will be affine transformed rather than simply scaled.
name : str, default 'mAP'
Name of this metric instance for display.
"""
def __init__(self,
img_height,
coco_annotations_file_path,
contiguous_id_to_json,
validation_ids=None,
use_file=False,
score_thresh=0.05,
data_shape=None,
post_affine=None,
name="mAP"):
super(CocoDetMApMetric, self).__init__(name=name)
self.img_height = img_height
self.coco_annotations_file_path = coco_annotations_file_path
self.contiguous_id_to_json = contiguous_id_to_json
self.validation_ids = validation_ids
self.use_file = use_file
self.score_thresh = score_thresh
self.current_idx = 0
self.coco_result = []
if isinstance(data_shape, (tuple, list)):
assert len(data_shape) == 2, "Data shape must be (height, width)"
elif not data_shape:
data_shape = None
else:
raise ValueError("data_shape must be None or tuple of int as (height, width)")
self._data_shape = data_shape
if post_affine is not None:
assert self._data_shape is not None, "Using post affine transform requires data_shape"
self._post_affine = post_affine
else:
self._post_affine = None
from pycocotools.coco import COCO
self.gt = COCO(self.coco_annotations_file_path)
self._img_ids = sorted(self.gt.getImgIds())
def reset(self):
self.current_idx = 0
self.coco_result = []
def get(self):
"""
Get evaluation metrics.
"""
if self.current_idx != len(self._img_ids):
warnings.warn("Recorded {} out of {} validation images, incomplete results".format(
self.current_idx, len(self._img_ids)))
from pycocotools.coco import COCO
gt = COCO(self.coco_annotations_file_path)
import tempfile
import json
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
json.dump(self.coco_result, f)
f.flush()
pred = gt.loadRes(f.name)
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, "bbox")
if self.validation_ids is not None:
coco_eval.params.imgIds = self.validation_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return self.name, tuple(coco_eval.stats[:3])
def update2(self,
pred_bboxes,
pred_labels,
pred_scores):
"""
Update internal buffer with latest predictions. Note that the statistics are not available until you call
self.get() to return the metrics.
Parameters:
----------
pred_bboxes : mxnet.NDArray or np.ndarray
Prediction bounding boxes with shape `B, N, 4`.
Where B is the size of mini-batch, N is the number of bboxes.
pred_labels : mxnet.NDArray or np.ndarray
Prediction bounding boxes labels with shape `B, N`.
pred_scores : mxnet.NDArray or np.ndarray
Prediction bounding boxes scores with shape `B, N`.
"""
def as_numpy(a):
"""
Convert a (list of) mx.NDArray into np.ndarray
"""
if isinstance(a, (list, tuple)):
out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a]
return np.concatenate(out, axis=0)
elif isinstance(a, mx.nd.NDArray):
a = a.asnumpy()
return a
for pred_bbox, pred_label, pred_score in zip(*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores]]):
valid_pred = np.where(pred_label.flat >= 0)[0]
pred_bbox = pred_bbox[valid_pred, :].astype(np.float)
pred_label = pred_label.flat[valid_pred].astype(int)
pred_score = pred_score.flat[valid_pred].astype(np.float)
imgid = self._img_ids[self.current_idx]
self.current_idx += 1
affine_mat = None
if self._data_shape is not None:
entry = self.gt.loadImgs(imgid)[0]
orig_height = entry["height"]
orig_width = entry["width"]
height_scale = float(orig_height) / self._data_shape[0]
width_scale = float(orig_width) / self._data_shape[1]
if self._post_affine is not None:
affine_mat = self._post_affine(orig_width, orig_height, self._data_shape[1], self._data_shape[0])
else:
height_scale, width_scale = (1.0, 1.0)
# for each bbox detection in each image
for bbox, label, score in zip(pred_bbox, pred_label, pred_score):
if label not in self.contiguous_id_to_json:
# ignore non-exist class
continue
if score < self.score_thresh:
continue
category_id = self.contiguous_id_to_json[label]
# rescale bboxes/affine transform bboxes
if affine_mat is not None:
bbox[0:2] = self.affine_transform(bbox[0:2], affine_mat)
bbox[2:4] = self.affine_transform(bbox[2:4], affine_mat)
else:
bbox[[0, 2]] *= width_scale
bbox[[1, 3]] *= height_scale
# convert [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h]
bbox[2:4] -= (bbox[:2] - 1)
self.coco_result.append({"image_id": imgid,
"category_id": category_id,
"bbox": bbox[:4].tolist(),
"score": score})
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
det_bboxes = []
det_ids = []
det_scores = []
for x_rr, y in zip(preds, labels):
bboxes = x_rr.slice_axis(axis=-1, begin=0, end=4)
ids = x_rr.slice_axis(axis=-1, begin=4, end=5).squeeze(axis=2)
scores = x_rr.slice_axis(axis=-1, begin=5, end=6).squeeze(axis=2)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, self.img_height))
self.update2(det_bboxes, det_ids, det_scores)
@staticmethod
def affine_transform(pt, t):
"""
Apply affine transform to a bounding box given transform matrix t.
Parameters:
----------
pt : np.ndarray
Bounding box with shape (1, 2).
t : np.ndarray
Transformation matrix with shape (2, 3).
Returns:
-------
np.ndarray
New bounding box with shape (1, 2).
"""
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
class VOCMApMetric(mx.metric.EvalMetric):
"""
Calculate mean AP for object detection task
Parameters:
---------
iou_thresh : float
IOU overlap threshold for TP
class_names : list of str
optional, if provided, will print out AP for each class
name : str, default 'mAP'
Name of this metric instance for display.
"""
def __init__(self,
iou_thresh=0.5,
class_names=None,
name="mAP"):
super(VOCMApMetric, self).__init__(name=name)
if class_names is None:
self.num = None
else:
assert isinstance(class_names, (list, tuple))
for name in class_names:
assert isinstance(name, str), "must provide names as str"
num = len(class_names)
self.name = list(class_names) + ["mAP"]
self.num = num + 1
self.reset()
self.iou_thresh = iou_thresh
self.class_names = class_names
def reset(self):
"""
Clear the internal statistics to initial state.
"""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
self._n_pos = defaultdict(int)
self._score = defaultdict(list)
self._match = defaultdict(list)
def get(self):
"""
Get the current evaluation result.
Returns:
-------
name : str
Name of the metric.
value : float
Value of the evaluation.
"""
self._update() # update metric at this time
if self.num is None:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.sum_metric / self.num_inst
else:
names = ["%s" % self.name[i] for i in range(self.num)]
values = [x / y if y != 0 else float("nan") for x, y in zip(self.sum_metric, self.num_inst)]
return names, values
def update(self,
pred_bboxes,
pred_labels,
pred_scores,
gt_bboxes,
gt_labels,
gt_difficults=None):
"""
Update internal buffer with latest prediction and gt pairs.
Parameters:
----------
pred_bboxes : mxnet.NDArray or np.ndarray
Prediction bounding boxes with shape `B, N, 4`.
Where B is the size of mini-batch, N is the number of bboxes.
pred_labels : mxnet.NDArray or np.ndarray
Prediction bounding boxes labels with shape `B, N`.
pred_scores : mxnet.NDArray or np.ndarray
Prediction bounding boxes scores with shape `B, N`.
gt_bboxes : mxnet.NDArray or np.ndarray
Ground-truth bounding boxes with shape `B, M, 4`.
Where B is the size of mini-batch, M is the number of ground-truths.
gt_labels : mxnet.NDArray or np.ndarray
Ground-truth bounding boxes labels with shape `B, M`.
gt_difficults : mxnet.NDArray or np.ndarray, optional, default is None
Ground-truth bounding boxes difficulty labels with shape `B, M`.
"""
def as_numpy(a):
"""
Convert a (list of) mx.NDArray into np.ndarray.
"""
if isinstance(a, (list, tuple)):
out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a]
try:
out = np.concatenate(out, axis=0)
except ValueError:
out = np.array(out)
return out
elif isinstance(a, mx.nd.NDArray):
a = a.asnumpy()
return a
if gt_difficults is None:
gt_difficults = [None for _ in as_numpy(gt_labels)]
if isinstance(gt_labels, list):
gt_diff_shape = gt_difficults[0].shape[0] if hasattr(gt_difficults[0], "shape") else 0
if len(gt_difficults) * gt_diff_shape != \
len(gt_labels) * gt_labels[0].shape[0]:
gt_difficults = [None] * len(gt_labels) * gt_labels[0].shape[0]
for pred_bbox, pred_label, pred_score, gt_bbox, gt_label, gt_difficult in zip(
*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults]]):
# strip padding -1 for pred and gt
valid_pred = np.where(pred_label.flat >= 0)[0]
pred_bbox = pred_bbox[valid_pred, :]
pred_label = pred_label.flat[valid_pred].astype(int)
pred_score = pred_score.flat[valid_pred]
valid_gt = np.where(gt_label.flat >= 0)[0]
gt_bbox = gt_bbox[valid_gt, :]
gt_label = gt_label.flat[valid_gt].astype(int)
if gt_difficult is None:
gt_difficult = np.zeros(gt_bbox.shape[0])
else:
gt_difficult = gt_difficult.flat[valid_gt]
for ll in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == ll
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == ll
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
self._n_pos[ll] += np.logical_not(gt_difficult_l).sum()
self._score[ll].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
self._match[ll].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = self.bbox_iou(pred_bbox_l, gt_bbox_l)
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < self.iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
self._match[ll].append(-1)
else:
if not selec[gt_idx]:
self._match[ll].append(1)
else:
self._match[ll].append(0)
selec[gt_idx] = True
else:
self._match[ll].append(0)
def _update(self):
"""
Update num_inst and sum_metric.
"""
aps = []
recall, precs = self._recall_prec()
for ll, rec, prec in zip(range(len(precs)), recall, precs):
ap = self._average_precision(rec, prec)
aps.append(ap)
if self.num is not None and ll < (self.num - 1):
self.sum_metric[ll] = ap
self.num_inst[ll] = 1
if self.num is None:
self.num_inst = 1
self.sum_metric = np.nanmean(aps)
else:
self.num_inst[-1] = 1
self.sum_metric[-1] = np.nanmean(aps)
def _recall_prec(self):
"""
Get recall and precision from internal records.
"""
n_fg_class = max(self._n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for ll in self._n_pos.keys():
score_l = np.array(self._score[ll])
match_l = np.array(self._match[ll], dtype=np.int32)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[ll] is nan.
with np.errstate(divide="ignore", invalid="ignore"):
prec[ll] = tp / (fp + tp)
# If n_pos[ll] is 0, rec[ll] is None.
if self._n_pos[ll] > 0:
rec[ll] = tp / self._n_pos[ll]
return rec, prec
def _average_precision(self,
rec,
prec):
"""
Calculate average precision.
Params:
----------
rec : np.array
cumulated recall
prec : np.array
cumulated precision
Returns:
----------
float
AP
"""
if rec is None or prec is None:
return np.nan
# append sentinel values at both ends
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], np.nan_to_num(prec), [0.0]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
@staticmethod
def bbox_iou(bbox_a, bbox_b, offset=0):
"""
Calculate Intersection-Over-Union(IOU) of two bounding boxes.
Parameters:
----------
bbox_a : np.ndarray
An ndarray with shape :math:`(N, 4)`.
bbox_b : np.ndarray
An ndarray with shape :math:`(M, 4)`.
offset : float or int, default is 0
The ``offset`` is used to control the whether the width(or height) is computed as
(right - left + ``offset``).
Note that the offset must be 0 for normalized bboxes, whose ranges are in ``[0, 1]``.
Returns:
-------
np.ndarray
An ndarray with shape :math:`(N, M)` indicates IOU between each pairs of
bounding boxes in `bbox_a` and `bbox_b`.
"""
if bbox_a.shape[1] < 4 or bbox_b.shape[1] < 4:
raise IndexError("Bounding boxes axis 1 must have at least length 4")
tl = np.maximum(bbox_a[:, None, :2], bbox_b[:, :2])
br = np.minimum(bbox_a[:, None, 2:4], bbox_b[:, 2:4])
area_i = np.prod(br - tl + offset, axis=2) * (tl < br).all(axis=2)
area_a = np.prod(bbox_a[:, 2:4] - bbox_a[:, :2] + offset, axis=1)
area_b = np.prod(bbox_b[:, 2:4] - bbox_b[:, :2] + offset, axis=1)
return area_i / (area_a[:, None] + area_b - area_i)
class VOC07MApMetric(VOCMApMetric):
"""
Mean average precision metric for PASCAL V0C 07 dataset.
Parameters:
---------
iou_thresh : float
IOU overlap threshold for TP
class_names : list of str
optional, if provided, will print out AP for each class
"""
def __init__(self, *args, **kwargs):
super(VOC07MApMetric, self).__init__(*args, **kwargs)
def _average_precision(self, rec, prec):
"""
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : np.array
cumulated recall
prec : np.array
cumulated precision
Returns:
----------
float
AP
"""
if rec is None or prec is None:
return np.nan
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec)[rec >= t])
ap += p / 11.0
return ap
class WiderfaceDetMetric(mx.metric.EvalMetric):
"""
Detection metric for WIDER FACE detection task.
Parameters:
----------
receptive_field_center_starts : list of int
The start location of the first receptive field of each scale.
receptive_field_strides : list of int
Receptive field stride for each scale.
bbox_factors : list of float
A half of bbox upper bound for each scale.
output_dir_path : str
Output file path.
name : str, default 'WF'
Name of this metric instance for display.
"""
def __init__(self,
receptive_field_center_starts,
receptive_field_strides,
bbox_factors,
output_dir_path,
name="WF"):
super(WiderfaceDetMetric, self).__init__(name=name)
self.receptive_field_center_starts = receptive_field_center_starts
self.receptive_field_strides = receptive_field_strides
self.bbox_factors = bbox_factors
self.output_dir_path = output_dir_path
self.num_output_scales = len(self.bbox_factors)
self.score_threshold = 0.11
self.nms_threshold = 0.4
self.top_k = 10000
def reset(self):
pass
def get(self):
"""
Get evaluation metrics.
"""
return self.name, 1.0
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
for x_rr, label in zip(preds, labels):
outputs = []
for output in x_rr:
outputs.append(output.asnumpy())
label_split = label.split("/")
resize_scale = float(label_split[2])
image_size = (int(label_split[3]), int(label_split[4]))
bboxes, _ = self.predict(outputs, resize_scale, image_size)
event_name = label_split[0]
event_dir_name = os.path.join(self.output_dir_path, event_name)
if not os.path.exists(event_dir_name):
os.makedirs(event_dir_name)
file_stem = label_split[1]
fout = open(os.path.join(event_dir_name, file_stem + ".txt"), "w")
fout.write(file_stem + "\n")
fout.write(str(len(bboxes)) + "\n")
for bbox in bboxes:
fout.write("%d %d %d %d %.03f" % (math.floor(bbox[0]),
math.floor(bbox[1]),
math.ceil(bbox[2] - bbox[0]),
math.ceil(bbox[3] - bbox[1]),
bbox[4] if bbox[4] <= 1 else 1) + "\n")
fout.close()
def predict(self, outputs, resize_scale, image_size):
bbox_collection = []
for i in range(self.num_output_scales):
score_map = np.squeeze(outputs[i * 2], (0, 1))
bbox_map = np.squeeze(outputs[i * 2 + 1], 0)
RF_center_Xs = np.array(
[self.receptive_field_center_starts[i] + self.receptive_field_strides[i] * x for x in
range(score_map.shape[1])])
RF_center_Xs_mat = np.tile(RF_center_Xs, [score_map.shape[0], 1])
RF_center_Ys = np.array(
[self.receptive_field_center_starts[i] + self.receptive_field_strides[i] * y for y in
range(score_map.shape[0])])
RF_center_Ys_mat = np.tile(RF_center_Ys, [score_map.shape[1], 1]).T
x_lt_mat = RF_center_Xs_mat - bbox_map[0, :, :] * self.bbox_factors[i]
y_lt_mat = RF_center_Ys_mat - bbox_map[1, :, :] * self.bbox_factors[i]
x_rb_mat = RF_center_Xs_mat - bbox_map[2, :, :] * self.bbox_factors[i]
y_rb_mat = RF_center_Ys_mat - bbox_map[3, :, :] * self.bbox_factors[i]
x_lt_mat = x_lt_mat / resize_scale
x_lt_mat[x_lt_mat < 0] = 0
y_lt_mat = y_lt_mat / resize_scale
y_lt_mat[y_lt_mat < 0] = 0
x_rb_mat = x_rb_mat / resize_scale
x_rb_mat[x_rb_mat > image_size[1]] = image_size[1]
y_rb_mat = y_rb_mat / resize_scale
y_rb_mat[y_rb_mat > image_size[0]] = image_size[0]
select_index = np.where(score_map > self.score_threshold)
for idx in range(select_index[0].size):
bbox_collection.append((x_lt_mat[select_index[0][idx], select_index[1][idx]],
y_lt_mat[select_index[0][idx], select_index[1][idx]],
x_rb_mat[select_index[0][idx], select_index[1][idx]],
y_rb_mat[select_index[0][idx], select_index[1][idx]],
score_map[select_index[0][idx], select_index[1][idx]]))
# NMS
bbox_collection = sorted(bbox_collection, key=lambda item: item[-1], reverse=True)
if len(bbox_collection) > self.top_k:
bbox_collection = bbox_collection[0:self.top_k]
bbox_collection_numpy = np.array(bbox_collection, dtype=np.float32)
final_bboxes = self.nms(bbox_collection_numpy, self.nms_threshold)
final_bboxes_ = []
for i in range(final_bboxes.shape[0]):
final_bboxes_.append((final_bboxes[i, 0], final_bboxes[i, 1], final_bboxes[i, 2], final_bboxes[i, 3],
final_bboxes[i, 4]))
return final_bboxes_
@staticmethod
def nms(boxes, overlap_threshold):
if boxes.shape[0] == 0:
return boxes
if boxes.dtype != np.float32:
boxes = boxes.astype(np.float32)
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
sc = boxes[:, 4]
widths = x2 - x1
heights = y2 - y1
area = heights * widths
idxs = np.argsort(sc)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlap_threshold)[0])))
return boxes[pick]
| 27,536
| 36.11186
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/hpe_metrics.py
|
"""
Evaluation Metrics for Human Pose Estimation.
"""
import mxnet as mx
__all__ = ['CocoHpeOksApMetric']
class CocoHpeOksApMetric(mx.metric.EvalMetric):
"""
Detection metric for COCO bbox task.
Parameters:
----------
coco_annotations_file_path : str
COCO anotation file path.
pose_postprocessing_fn : func
An function for pose post-processing.
validation_ids : bool, default False
Whether to use temporary file for estimation.
use_file : bool, default False
Whether to use temporary file for estimation.
name : str, default 'CocoOksAp'
Name of this metric instance for display.
"""
def __init__(self,
coco_annotations_file_path,
pose_postprocessing_fn,
validation_ids=None,
use_file=False,
name="CocoOksAp"):
super(CocoHpeOksApMetric, self).__init__(name=name)
self.coco_annotations_file_path = coco_annotations_file_path
self.pose_postprocessing_fn = pose_postprocessing_fn
self.validation_ids = validation_ids
self.use_file = use_file
self.coco_result = []
def reset(self):
self.coco_result = []
def get(self):
"""
Get evaluation metrics.
"""
import copy
from pycocotools.coco import COCO
gt = COCO(self.coco_annotations_file_path)
if self.use_file:
import tempfile
import json
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
json.dump(self.coco_result, f)
f.flush()
pred = gt.loadRes(f.name)
else:
def calc_pred(coco, anns):
import numpy as np
import copy
pred = COCO()
pred.dataset["images"] = [img for img in coco.dataset["images"]]
annsImgIds = [ann["image_id"] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(coco.getImgIds()))
pred.dataset["categories"] = copy.deepcopy(coco.dataset["categories"])
for id, ann in enumerate(anns):
s = ann["keypoints"]
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann["area"] = (x1 - x0) * (y1 - y0)
ann["id"] = id + 1
ann["bbox"] = [x0, y0, x1 - x0, y1 - y0]
pred.dataset["annotations"] = anns
pred.createIndex()
return pred
pred = calc_pred(gt, copy.deepcopy(self.coco_result))
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, "keypoints")
if self.validation_ids is not None:
coco_eval.params.imgIds = self.validation_ids
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return self.name, tuple(coco_eval.stats[:3])
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
pred_pts_score, pred_person_score, label_img_id = self.pose_postprocessing_fn(pred, label)
for idx in range(len(pred_pts_score)):
image_id = int(label_img_id[idx])
kpt = pred_pts_score[idx].flatten().tolist()
score = float(pred_person_score[idx])
self.coco_result.append({
"image_id": image_id,
"category_id": 1,
"keypoints": kpt,
"score": score})
| 4,037
| 32.371901
| 102
|
py
|
imgclsmob
|
imgclsmob-master/gluon/metrics/asr_metrics.py
|
"""
Evaluation Metrics for Automatic Speech Recognition (ASR).
"""
import mxnet as mx
__all__ = ['WER']
class WER(mx.metric.EvalMetric):
"""
Computes Word Error Rate (WER) for Automatic Speech Recognition (ASR).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
name : str, default 'wer'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
vocabulary,
name="wer",
output_names=None,
label_names=None):
super(WER, self).__init__(
name=name,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.vocabulary = vocabulary
self.ctc_decoder = CtcDecoder(vocabulary=vocabulary)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
import editdistance
for labels_i, preds_i in zip(labels, preds):
labels_code = labels_i.asnumpy()
labels_i = []
for label_code in labels_code:
label_text = "".join([self.ctc_decoder.labels_map[c] for c in label_code])
labels_i.append(label_text)
preds_i = preds_i[0]
greedy_predictions = preds_i.swapaxes(1, 2).log_softmax(axis=-1).argmax(axis=-1, keepdims=False).asnumpy()
preds_i = self.ctc_decoder(greedy_predictions)
assert (len(labels_i) == len(preds_i))
for pred, label in zip(preds_i, labels_i):
pred = pred.split()
label = label.split()
word_error_count = editdistance.eval(label, pred)
word_count = max(len(label), len(pred))
assert (word_error_count <= word_count)
self.sum_metric += word_error_count
self.global_sum_metric += word_error_count
self.num_inst += word_count
self.global_num_inst += word_count
class CtcDecoder(object):
"""
CTC decoder (to decode a sequence of labels to words).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
"""
def __init__(self,
vocabulary):
super().__init__()
self.blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
def __call__(self,
predictions):
"""
Decode a sequence of labels to words.
Parameters:
----------
predictions : np.array of int or list of list of int
Tensor with predicted labels.
Returns:
-------
list of str
Words.
"""
hypotheses = []
for prediction in predictions:
decoded_prediction = []
previous = self.blank_id
for p in prediction:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = "".join([self.labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
| 3,800
| 30.413223
| 118
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/imagenet1k_cls_dataset.py
|
"""
ImageNet-1K classification dataset.
"""
import os
import math
import mxnet as mx
from mxnet.gluon import HybridBlock
from mxnet.gluon.data.vision import ImageFolderDataset
from mxnet.gluon.data.vision import transforms
from .dataset_metainfo import DatasetMetaInfo
class ImageNet1K(ImageFolderDataset):
"""
ImageNet-1K classification dataset.
Refer to MXNet documentation for the description of this dataset and how to prepare it.
Parameters:
----------
root : str, default '~/.mxnet/datasets/imagenet'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "imagenet"),
mode="train",
transform=None):
split = "train" if mode == "train" else "val"
root = os.path.join(root, split)
super(ImageNet1K, self).__init__(root=root, flag=1, transform=transform)
class ImageNet1KMetaInfo(DatasetMetaInfo):
"""
Descriptor of ImageNet-1K dataset.
"""
def __init__(self):
super(ImageNet1KMetaInfo, self).__init__()
self.label = "ImageNet1K"
self.short_label = "imagenet"
self.root_dir_name = "imagenet"
self.dataset_class = ImageNet1K
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 1000
self.input_image_size = (224, 224)
self.resize_inv_factor = 0.875
self.aug_type = "aug0"
self.train_metric_capts = ["Train.Top1"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err-top1"}]
self.val_metric_capts = ["Val.Top1", "Val.Top5"]
self.val_metric_names = ["Top1Error", "TopKError"]
self.val_metric_extra_kwargs = [{"name": "err-top1"}, {"name": "err-top5", "top_k": 5}]
self.saver_acc_ind = 1
self.train_transform = imagenet_train_transform
self.val_transform = imagenet_val_transform
self.test_transform = imagenet_val_transform
self.ml_type = "imgcls"
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.interpolation = 1
self.loss_name = "SoftmaxCrossEntropy"
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(ImageNet1KMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
default=self.input_image_size[0],
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=self.resize_inv_factor,
help="inverted ratio for input image crop")
parser.add_argument(
"--aug-type",
type=str,
default="aug0",
help="augmentation type. options are aug0, aug1, aug2")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=self.mean_rgb,
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=self.std_rgb,
help="STD of RGB channels in the dataset")
parser.add_argument(
"--interpolation",
type=int,
default=self.interpolation,
help="Preprocessing interpolation")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(ImageNet1KMetaInfo, self).update(args)
self.input_image_size = (args.input_size, args.input_size)
self.resize_inv_factor = args.resize_inv_factor
self.aug_type = args.aug_type
self.mean_rgb = args.mean_rgb
self.std_rgb = args.std_rgb
self.interpolation = args.interpolation
class ImgAugTransform(HybridBlock):
"""
ImgAug-like transform (geometric, noise, and blur).
"""
def __init__(self):
super(ImgAugTransform, self).__init__()
from imgaug import augmenters as iaa
from imgaug import parameters as iap
self.seq = iaa.Sequential(
children=[
iaa.Sequential(
children=[
iaa.Sequential(
children=[
iaa.OneOf(
children=[
iaa.Sometimes(
p=0.95,
then_list=iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
rotate=(-30, 30),
shear=(-15, 15),
order=iap.Choice([0, 1, 3], p=[0.15, 0.80, 0.05]),
mode="reflect",
name="Affine")),
iaa.Sometimes(
p=0.05,
then_list=iaa.PerspectiveTransform(
scale=(0.01, 0.1)))],
name="Blur"),
iaa.Sometimes(
p=0.01,
then_list=iaa.PiecewiseAffine(
scale=(0.0, 0.01),
nb_rows=(4, 20),
nb_cols=(4, 20),
order=iap.Choice([0, 1, 3], p=[0.15, 0.80, 0.05]),
mode="reflect",
name="PiecewiseAffine"))],
random_order=True,
name="GeomTransform"),
iaa.Sequential(
children=[
iaa.Sometimes(
p=0.75,
then_list=iaa.Add(
value=(-10, 10),
per_channel=0.5,
name="Brightness")),
iaa.Sometimes(
p=0.05,
then_list=iaa.Emboss(
alpha=(0.0, 0.5),
strength=(0.5, 1.2),
name="Emboss")),
iaa.Sometimes(
p=0.1,
then_list=iaa.Sharpen(
alpha=(0.0, 0.5),
lightness=(0.5, 1.2),
name="Sharpen")),
iaa.Sometimes(
p=0.25,
then_list=iaa.ContrastNormalization(
alpha=(0.5, 1.5),
per_channel=0.5,
name="ContrastNormalization"))
],
random_order=True,
name="ColorTransform"),
iaa.Sequential(
children=[
iaa.Sometimes(
p=0.5,
then_list=iaa.AdditiveGaussianNoise(
loc=0,
scale=(0.0, 10.0),
per_channel=0.5,
name="AdditiveGaussianNoise")),
iaa.Sometimes(
p=0.1,
then_list=iaa.SaltAndPepper(
p=(0, 0.001),
per_channel=0.5,
name="SaltAndPepper"))],
random_order=True,
name="Noise"),
iaa.OneOf(
children=[
iaa.Sometimes(
p=0.05,
then_list=iaa.MedianBlur(
k=3,
name="MedianBlur")),
iaa.Sometimes(
p=0.05,
then_list=iaa.AverageBlur(
k=(2, 4),
name="AverageBlur")),
iaa.Sometimes(
p=0.5,
then_list=iaa.GaussianBlur(
sigma=(0.0, 2.0),
name="GaussianBlur"))],
name="Blur"),
],
random_order=True,
name="MainProcess")])
def hybrid_forward(self, F, x):
img = x.asnumpy().copy()
# cv2.imshow(winname="imgA", mat=img)
img_aug = self.seq.augment_image(img)
# cv2.imshow(winname="img_augA", mat=img_aug)
# cv2.waitKey()
x = mx.nd.array(img_aug, dtype=x.dtype, ctx=x.context)
return x
def imagenet_train_transform(ds_metainfo,
jitter_param=0.4,
lighting_param=0.1):
"""
Create image transform sequence for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
jitter_param : float
How much to jitter values.
lighting_param : float
How much to noise intensity of the image.
Returns:
-------
Sequential
Image transform sequence.
"""
input_image_size = ds_metainfo.input_image_size
if ds_metainfo.aug_type == "aug0":
interpolation = ds_metainfo.interpolation
transform_list = []
elif ds_metainfo.aug_type == "aug1":
interpolation = 10
transform_list = []
elif ds_metainfo.aug_type == "aug2":
interpolation = 10
transform_list = [
ImgAugTransform()
]
else:
raise RuntimeError("Unknown augmentation type: {}\n".format(ds_metainfo.aug_type))
transform_list += [
transforms.RandomResizedCrop(
size=input_image_size,
interpolation=interpolation),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize(
mean=ds_metainfo.mean_rgb,
std=ds_metainfo.std_rgb)
]
return transforms.Compose(transform_list)
def imagenet_val_transform(ds_metainfo):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
Returns:
-------
Sequential
Image transform sequence.
"""
input_image_size = ds_metainfo.input_image_size
resize_value = calc_val_resize_value(
input_image_size=ds_metainfo.input_image_size,
resize_inv_factor=ds_metainfo.resize_inv_factor)
return transforms.Compose([
transforms.Resize(
size=resize_value,
keep_ratio=True,
interpolation=ds_metainfo.interpolation),
transforms.CenterCrop(size=input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=ds_metainfo.mean_rgb,
std=ds_metainfo.std_rgb)
])
def calc_val_resize_value(input_image_size=(224, 224),
resize_inv_factor=0.875):
"""
Calculate image resize value for validation subset.
Parameters:
----------
input_image_size : tuple of 2 int
Main script arguments.
resize_inv_factor : float
Resize inverted factor.
Returns:
-------
int
Resize value.
"""
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return resize_value
| 13,686
| 36.705234
| 107
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/coco_hpe1_dataset.py
|
"""
COCO keypoint detection (2D single human pose estimation) dataset.
"""
import os
import copy
import cv2
import numpy as np
import mxnet as mx
from mxnet.gluon.data import dataset
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe1Dataset(dataset.Dataset):
"""
COCO keypoint detection (2D single human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
splits : list of str, default ['person_keypoints_val2017']
Json annotations name.
Candidates can be: person_keypoints_val2017, person_keypoints_train2017.
check_centers : bool, default is False
If true, will force check centers of bbox and keypoints, respectively.
If centers are far away from each other, remove this label.
skip_empty : bool, default is False
Whether skip entire image if no valid label is found. Use `False` if this dataset is
for validation to avoid COCO metric error.
"""
def __init__(self,
root,
mode="train",
transform=None,
splits=("person_keypoints_val2017",),
check_centers=False,
skip_empty=True):
super(CocoHpe1Dataset, self).__init__()
self.root = os.path.expanduser(root)
self.mode = mode
self._transform = transform
self.classes = ["person"]
self.num_class = len(self.classes)
self.keypoint_names = {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
}
self.skeleton = [
[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8],
[7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
# Joint pairs which defines the pairs of joint to be swapped when the image is flipped horizontally:
self.joint_pairs = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
self.num_joints = 17
if isinstance(splits, mx.base.string_types):
splits = [splits]
self._splits = splits
self._coco = []
self._check_centers = check_centers
self._skip_empty = skip_empty
self.index_map = dict(zip(self.classes, range(self.num_class)))
self.json_id_to_contiguous = None
self.contiguous_id_to_json = None
self._items, self._labels = self._load_jsons()
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
self.annotations_file_path = annotations_file_path
def __str__(self):
detail = ",".join([str(s) for s in self._splits])
return self.__class__.__name__ + "(" + detail + ")"
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
img_path = self._items[idx]
img_id = int(os.path.splitext(os.path.basename(img_path))[0])
label = copy.deepcopy(self._labels[idx])
img = mx.image.imread(img_path, 1)
if self._transform is not None:
img, scale, center, score = self._transform(img, label)
res_label = np.array([float(img_id)] + [float(score)] + list(center) + list(scale), np.float32)
return img, res_label
def _load_jsons(self):
"""
Load all image paths and labels from JSON annotation files into buffer.
"""
items = []
labels = []
from pycocotools.coco import COCO
for split in self._splits:
anno = os.path.join(self.root, "annotations", split) + ".json"
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
dirname, filename = entry["coco_url"].split("/")[-2:]
abs_path = os.path.join(self.root, dirname, filename)
if not os.path.exists(abs_path):
raise IOError("Image: {} not exists.".format(abs_path))
label = self._check_load_keypoints(_coco, entry)
if not label:
continue
# num of items are relative to person, not image
for obj in label:
items.append(abs_path)
labels.append(obj)
return items, labels
def _check_load_keypoints(self, coco, entry):
"""
Check and load ground-truth keypoints.
"""
ann_ids = coco.getAnnIds(imgIds=entry["id"], iscrowd=False)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
width = entry["width"]
height = entry["height"]
for obj in objs:
contiguous_cid = self.json_id_to_contiguous[obj["category_id"]]
if contiguous_cid >= self.num_class:
# not class of interest
continue
if max(obj["keypoints"]) == 0:
continue
# convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound
xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height)
# require non-zero box area
if obj['area'] <= 0 or xmax <= xmin or ymax <= ymin:
continue
# joints 3d: (num_joints, 3, 2); 3 is for x, y, z; 2 is for position, visibility
joints_3d = np.zeros((self.num_joints, 3, 2), dtype=np.float32)
for i in range(self.num_joints):
joints_3d[i, 0, 0] = obj["keypoints"][i * 3 + 0]
joints_3d[i, 1, 0] = obj["keypoints"][i * 3 + 1]
# joints_3d[i, 2, 0] = 0
visible = min(1, obj["keypoints"][i * 3 + 2])
joints_3d[i, :2, 1] = visible
# joints_3d[i, 2, 1] = 0
if np.sum(joints_3d[:, 0, 1]) < 1:
# no visible keypoint
continue
if self._check_centers:
bbox_center, bbox_area = self._get_box_center_area((xmin, ymin, xmax, ymax))
kp_center, num_vis = self._get_keypoints_center_count(joints_3d)
ks = np.exp(-2 * np.sum(np.square(bbox_center - kp_center)) / bbox_area)
if (num_vis / 80.0 + 47 / 80.0) > ks:
continue
valid_objs.append({
"bbox": (xmin, ymin, xmax, ymax),
"joints_3d": joints_3d
})
if not valid_objs:
if not self._skip_empty:
# dummy invalid labels if no valid objects are found
valid_objs.append({
"bbox": np.array([-1, -1, 0, 0]),
"joints_3d": np.zeros((self.num_joints, 3, 2), dtype=np.float32)
})
return valid_objs
@staticmethod
def _get_box_center_area(bbox):
"""
Get bbox center.
"""
c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])
area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
return c, area
@staticmethod
def _get_keypoints_center_count(keypoints):
"""
Get geometric center of all keypoints.
"""
keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))
keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))
num = float(np.sum(keypoints[:, 0, 1]))
return np.array([keypoint_x / num, keypoint_y / num]), num
@staticmethod
def bbox_clip_xyxy(xyxy, width, height):
"""
Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary.
All bounding boxes will be clipped to the new region `(0, 0, width, height)`.
Parameters:
----------
xyxy : list, tuple or numpy.ndarray
The bbox in format (xmin, ymin, xmax, ymax).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
width : int or float
Boundary width.
height : int or float
Boundary height.
Returns:
-------
tuple or np.array
Description of returned object.
"""
if isinstance(xyxy, (tuple, list)):
if not len(xyxy) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy)))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[3]))
return x1, y1, x2, y2
elif isinstance(xyxy, np.ndarray):
if not xyxy.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3]))
return np.hstack((x1, y1, x2, y2))
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy)))
@staticmethod
def bbox_xywh_to_xyxy(xywh):
"""
Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax)
Parameters:
----------
xywh : list, tuple or numpy.ndarray
The bbox in format (x, y, w, h).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
Returns:
-------
tuple or np.ndarray
The converted bboxes in format (xmin, ymin, xmax, ymax).
If input is numpy.ndarray, return is numpy.ndarray correspondingly.
"""
if isinstance(xywh, (tuple, list)):
if not len(xywh) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh)))
w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0)
return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h
elif isinstance(xywh, np.ndarray):
if not xywh.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape))
xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1)))
return xyxy
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh)))
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeValTransform1(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
height = self.image_size[0]
width = self.image_size[1]
self.aspect_ratio = float(width / height)
self.mean = ds_metainfo.mean_rgb
self.std = ds_metainfo.std_rgb
def __call__(self, src, label):
bbox = label["bbox"]
assert len(bbox) == 4
xmin, ymin, xmax, ymax = bbox
center, scale = _box_to_center_scale(xmin, ymin, xmax - xmin, ymax - ymin, self.aspect_ratio)
score = label.get("score", 1)
h, w = self.image_size
trans = get_affine_transform(center, scale, 0, [w, h])
img = cv2.warpAffine(src.asnumpy(), trans, (int(w), int(h)), flags=cv2.INTER_LINEAR)
img = mx.nd.image.to_tensor(mx.nd.array(img))
img = mx.nd.image.normalize(img, mean=self.mean, std=self.std)
return img, scale, center, score
def _box_to_center_scale(x, y, w, h, aspect_ratio=1.0, scale_mult=1.25):
pixel_std = 1
center = np.zeros((2,), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32)
if center[0] != -1:
scale = scale * scale_mult
return center, scale
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeValTransform2(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
height = self.image_size[0]
width = self.image_size[1]
self.aspect_ratio = float(width / height)
self.mean = ds_metainfo.mean_rgb
self.std = ds_metainfo.std_rgb
def __call__(self, src, label):
# print(src.shape)
src = src.asnumpy()
bbox = label["bbox"]
assert len(bbox) == 4
score = label.get('score', 1)
img, scale_box = detector_to_alpha_pose(
src,
class_ids=mx.nd.array([[0.]]),
scores=mx.nd.array([[1.]]),
bounding_boxs=mx.nd.array(np.array([bbox])),
output_shape=self.image_size)
if scale_box.shape[0] == 1:
pt1 = np.array(scale_box[0, (0, 1)], dtype=np.float32)
pt2 = np.array(scale_box[0, (2, 3)], dtype=np.float32)
else:
assert scale_box.shape[0] == 4
pt1 = np.array(scale_box[(0, 1)], dtype=np.float32)
pt2 = np.array(scale_box[(2, 3)], dtype=np.float32)
return img[0], pt1, pt2, score
def detector_to_alpha_pose(img,
class_ids,
scores,
bounding_boxs,
output_shape=(256, 192),
thr=0.5):
boxes, scores = alpha_pose_detection_processor(
img=img,
boxes=bounding_boxs,
class_idxs=class_ids,
scores=scores,
thr=thr)
pose_input, upscale_bbox = alpha_pose_image_cropper(
source_img=img,
boxes=boxes,
output_shape=output_shape)
return pose_input, upscale_bbox
def alpha_pose_detection_processor(img,
boxes,
class_idxs,
scores,
thr=0.5):
if len(boxes.shape) == 3:
boxes = boxes.squeeze(axis=0)
if len(class_idxs.shape) == 3:
class_idxs = class_idxs.squeeze(axis=0)
if len(scores.shape) == 3:
scores = scores.squeeze(axis=0)
# cilp coordinates
boxes[:, [0, 2]] = mx.nd.clip(boxes[:, [0, 2]], 0., img.shape[1] - 1)
boxes[:, [1, 3]] = mx.nd.clip(boxes[:, [1, 3]], 0., img.shape[0] - 1)
# select boxes
mask1 = (class_idxs == 0).asnumpy()
mask2 = (scores > thr).asnumpy()
picked_idxs = np.where((mask1 + mask2) > 1)[0]
if picked_idxs.shape[0] == 0:
return None, None
else:
return boxes[picked_idxs], scores[picked_idxs]
def alpha_pose_image_cropper(source_img,
boxes,
output_shape=(256, 192)):
if boxes is None:
return None, boxes
# crop person poses
img_width, img_height = source_img.shape[1], source_img.shape[0]
tensors = mx.nd.zeros([boxes.shape[0], 3, output_shape[0], output_shape[1]])
out_boxes = np.zeros([boxes.shape[0], 4])
for i, box in enumerate(boxes.asnumpy()):
img = source_img.copy()
box_width = box[2] - box[0]
box_height = box[3] - box[1]
if box_width > 100:
scale_rate = 0.2
else:
scale_rate = 0.3
# crop image
left = int(max(0, box[0] - box_width * scale_rate / 2))
up = int(max(0, box[1] - box_height * scale_rate / 2))
right = int(min(img_width - 1, max(left + 5, box[2] + box_width * scale_rate / 2)))
bottom = int(min(img_height - 1, max(up + 5, box[3] + box_height * scale_rate / 2)))
crop_width = right - left
if crop_width < 1:
continue
crop_height = bottom - up
if crop_height < 1:
continue
ul = np.array((left, up))
br = np.array((right, bottom))
img = cv_cropBox(img, ul, br, output_shape[0], output_shape[1])
img = mx.nd.image.to_tensor(mx.nd.array(img))
# img = img.transpose((2, 0, 1))
img[0] = img[0] - 0.406
img[1] = img[1] - 0.457
img[2] = img[2] - 0.480
assert (img.shape[0] == 3)
tensors[i] = img
out_boxes[i] = (left, up, right, bottom)
return tensors, out_boxes
def cv_cropBox(img, ul, br, resH, resW, pad_val=0):
ul = ul
br = (br - 1)
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.ndim == 2:
img = img[:, np.newaxis]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
img[:ul[1], :, :], img[:, :ul[0], :] = pad_val, pad_val
img[br[1] + 1:, :, :], img[:, br[0] + 1:, :] = pad_val, pad_val
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(img, trans, (resW, resH), flags=cv2.INTER_LINEAR)
return dst_img
# ---------------------------------------------------------------------------------------------------------------------
def recalc_pose1(keypoints,
bbs,
image_size):
def transform_preds(coords, center, scale, output_size):
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
center = bbs[:, :2]
scale = bbs[:, 2:4]
heatmap_height = image_size[0] // 4
heatmap_width = image_size[1] // 4
output_size = [heatmap_width, heatmap_height]
preds = np.zeros_like(keypoints)
for i in range(keypoints.shape[0]):
preds[i] = transform_preds(keypoints[i], center[i], scale[i], output_size)
return preds
def recalc_pose1b(pred,
label,
image_size,
visible_conf_threshold=0.0):
label_img_id = label[:, 0].astype(np.int32)
label_score = label[:, 1]
label_bbs = label[:, 2:6]
pred_keypoints = pred[:, :, :2]
pred_score = pred[:, :, 2]
pred[:, :, :2] = recalc_pose1(pred_keypoints, label_bbs, image_size)
pred_person_score = []
batch = pred_keypoints.shape[0]
num_joints = pred_keypoints.shape[1]
for idx in range(batch):
kpt_score = 0
count = 0
for i in range(num_joints):
mval = float(pred_score[idx][i])
if mval > visible_conf_threshold:
kpt_score += mval
count += 1
if count > 0:
kpt_score /= count
kpt_score = kpt_score * float(label_score[idx])
pred_person_score.append(kpt_score)
return pred, pred_person_score, label_img_id
def recalc_pose2(keypoints,
bbs,
image_size):
def transformBoxInvert(pt, ul, br, resH, resW):
center = np.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
lenW = lenH * resW / resH
_pt = (pt * lenH) / resH
if bool(((lenW - 1) / 2 - center[0]) > 0):
_pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0])
if bool(((lenH - 1) / 2 - center[1]) > 0):
_pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1])
new_point = np.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
pt2 = bbs[:, :2]
pt1 = bbs[:, 2:4]
heatmap_height = image_size[0] // 4
heatmap_width = image_size[1] // 4
preds = np.zeros_like(keypoints)
for i in range(keypoints.shape[0]):
for j in range(keypoints.shape[1]):
preds[i, j] = transformBoxInvert(keypoints[i, j], pt1[i], pt2[i], heatmap_height, heatmap_width)
return preds
def recalc_pose2b(pred,
label,
image_size,
visible_conf_threshold=0.0):
label_img_id = label[:, 0].astype(np.int32)
label_score = label[:, 1]
label_bbs = label[:, 2:6]
pred_keypoints = pred[:, :, :2]
pred_score = pred[:, :, 2]
pred[:, :, :2] = recalc_pose2(pred_keypoints, label_bbs, image_size)
pred_person_score = []
batch = pred_keypoints.shape[0]
num_joints = pred_keypoints.shape[1]
for idx in range(batch):
kpt_score = 0
count = 0
for i in range(num_joints):
mval = float(pred_score[idx][i])
if mval > visible_conf_threshold:
kpt_score += mval
count += 1
if count > 0:
kpt_score /= count
kpt_score = kpt_score * float(label_score[idx])
pred_person_score.append(kpt_score)
return pred, pred_person_score, label_img_id
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe1MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe1MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe1Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (256, 192)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose1b(x, y, self.input_image_size)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpeValTransform1
self.test_transform = CocoHpeValTransform1
self.ml_type = "hpe"
self.allow_hybridize = False
self.test_net_extra_kwargs = {"fixed_size": False}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.model_type = 1
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe1MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--model-type",
type=int,
default=self.model_type,
help="model type (1=SimplePose, 2=AlphaPose)")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe1MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.model_type = args.model_type
if self.model_type == 1:
self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\
lambda x, y: recalc_pose1b(x, y, self.input_image_size)
self.val_transform = CocoHpeValTransform1
self.test_transform = CocoHpeValTransform1
else:
self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\
lambda x, y: recalc_pose2b(x, y, self.input_image_size)
self.val_transform = CocoHpeValTransform2
self.test_transform = CocoHpeValTransform2
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
| 28,936
| 34.289024
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/widerface_det_dataset.py
|
"""
WIDER FACE detection dataset.
"""
import os
import cv2
import mxnet as mx
import numpy as np
from mxnet.gluon.data import dataset
from .dataset_metainfo import DatasetMetaInfo
__all__ = ['WiderfaceDetMetaInfo']
class WiderfaceDetDataset(dataset.Dataset):
"""
WIDER FACE detection dataset.
Parameters:
----------
root : str
Path to folder storing the dataset.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(WiderfaceDetDataset, self).__init__()
self.root = os.path.expanduser(root)
self.mode = mode
self._transform = transform
self.synsets = []
self.items = []
image_dir_path = "{}/WIDER_{}/images".format(self.root, self.mode)
for folder in sorted(os.listdir(image_dir_path)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in (".jpg",):
continue
self.items.append((filename, label))
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
img_path = self.items[idx][0]
# image = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)
image = mx.image.imread(img_path, flag=1).asnumpy()
image_size = image.shape[:2]
shorter_side = min(image.shape[:2])
resize_scale = 1.0
if shorter_side < 128:
resize_scale = 128.0 / shorter_side
image = cv2.resize(image, (0, 0), fx=resize_scale, fy=resize_scale)
image = image.transpose(2, 0, 1).astype(np.float32)
image = mx.nd.array(image)
label = "{}/{}/{}/{}/{}".format(self.synsets[self.items[idx][1]], (img_path.split("/")[1]).split(".")[0],
resize_scale, image_size[0], image_size[1])
label = np.array(label).copy()
if self._transform is not None:
image, label = self._transform(image, label)
return image, label
# ---------------------------------------------------------------------------------------------------------------------
class WiderfaceDetValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, image, label):
return image, label
# ---------------------------------------------------------------------------------------------------------------------
class WiderfaceDetMetaInfo(DatasetMetaInfo):
def __init__(self):
super(WiderfaceDetMetaInfo, self).__init__()
self.label = "WiderFace"
self.short_label = "widerface"
self.root_dir_name = "WIDER_FACE"
self.dataset_class = WiderfaceDetDataset
self.num_training_samples = None
self.in_channels = 3
self.input_image_size = (480, 640)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["WF"]
self.test_metric_names = ["WiderfaceDetMetric"]
self.test_metric_extra_kwargs = [
{"name": "WF"}]
self.saver_acc_ind = 0
self.do_transform = True
self.do_transform_first = False
self.last_batch = "keep"
self.val_transform = WiderfaceDetValTransform
self.test_transform = WiderfaceDetValTransform
self.ml_type = "det"
self.allow_hybridize = False
self.test_net_extra_kwargs = None
self.model_type = 1
self.receptive_field_center_starts = None
self.receptive_field_strides = None
self.bbox_factors = None
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(WiderfaceDetMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--model-type",
type=int,
default=self.model_type,
help="model type (1=320, 2=560)")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(WiderfaceDetMetaInfo, self).update(args)
self.model_type = args.model_type
if self.model_type == 1:
self.receptive_field_center_starts = [3, 7, 15, 31, 63]
self.receptive_field_strides = [4, 8, 16, 32, 64]
self.bbox_factors = [10.0, 20.0, 40.0, 80.0, 160.0]
else:
self.receptive_field_center_starts = [3, 3, 7, 7, 15, 31, 31, 31]
self.receptive_field_strides = [4, 4, 8, 8, 16, 32, 32, 32]
self.bbox_factors = [7.5, 10.0, 20.0, 35.0, 55.0, 125.0, 200.0, 280.0]
| 5,676
| 32.791667
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/coco_det_dataset.py
|
"""
MS COCO object detection dataset.
"""
__all__ = ['CocoDetMetaInfo']
import os
import cv2
import logging
import mxnet as mx
import numpy as np
from PIL import Image
from mxnet.gluon.data import dataset
from .dataset_metainfo import DatasetMetaInfo
class CocoDetDataset(dataset.Dataset):
"""
MS COCO detection dataset.
Parameters:
----------
root : str
Path to folder storing the dataset.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
splits : list of str, default ['instances_val2017']
Json annotations name.
Candidates can be: instances_val2017, instances_train2017.
min_object_area : float
Minimum accepted ground-truth area, if an object's area is smaller than this value,
it will be ignored.
skip_empty : bool, default is True
Whether skip images with no valid object. This should be `True` in training, otherwise
it will cause undefined behavior.
use_crowd : bool, default is True
Whether use boxes labeled as crowd instance.
"""
CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
def __init__(self,
root,
mode="train",
transform=None,
splits=('instances_val2017',),
min_object_area=0,
skip_empty=True,
use_crowd=True):
super(CocoDetDataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self._transform = transform
self.num_class = len(self.CLASSES)
self._min_object_area = min_object_area
self._skip_empty = skip_empty
self._use_crowd = use_crowd
if isinstance(splits, mx.base.string_types):
splits = [splits]
self._splits = splits
self.index_map = dict(zip(type(self).CLASSES, range(self.num_class)))
self.json_id_to_contiguous = None
self.contiguous_id_to_json = None
self._coco = []
self._items, self._labels, self._im_aspect_ratios = self._load_jsons()
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "instances_" + mode_name + "2017.json")
self.annotations_file_path = annotations_file_path
def __str__(self):
detail = ','.join([str(s) for s in self._splits])
return self.__class__.__name__ + '(' + detail + ')'
@property
def coco(self):
"""
Return pycocotools object for evaluation purposes.
"""
if not self._coco:
raise ValueError("No coco objects found, dataset not initialized.")
if len(self._coco) > 1:
raise NotImplementedError(
"Currently we don't support evaluating {} JSON files. \
Please use single JSON dataset and evaluate one by one".format(len(self._coco)))
return self._coco[0]
@property
def classes(self):
"""
Category names.
"""
return type(self).CLASSES
@property
def annotation_dir(self):
"""
The subdir for annotations. Default is 'annotations'(coco default)
For example, a coco format json file will be searched as
'root/annotation_dir/xxx.json'
You can override if custom dataset don't follow the same pattern
"""
return 'annotations'
def get_im_aspect_ratio(self):
"""Return the aspect ratio of each image in the order of the raw data."""
if self._im_aspect_ratios is not None:
return self._im_aspect_ratios
self._im_aspect_ratios = [None] * len(self._items)
for i, img_path in enumerate(self._items):
with Image.open(img_path) as im:
w, h = im.size
self._im_aspect_ratios[i] = 1.0 * w / h
return self._im_aspect_ratios
def _parse_image_path(self, entry):
"""How to parse image dir and path from entry.
Parameters:
----------
entry : dict
COCO entry, e.g. including width, height, image path, etc..
Returns:
-------
abs_path : str
Absolute path for corresponding image.
"""
dirname, filename = entry["coco_url"].split("/")[-2:]
abs_path = os.path.join(self._root, dirname, filename)
return abs_path
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
img_path = self._items[idx]
label = self._labels[idx]
img = mx.image.imread(img_path, 1)
label = np.array(label).copy()
if self._transform is not None:
img, label = self._transform(img, label)
return img, label
def _load_jsons(self):
"""
Load all image paths and labels from JSON annotation files into buffer.
"""
items = []
labels = []
im_aspect_ratios = []
from pycocotools.coco import COCO
for split in self._splits:
anno = os.path.join(self._root, self.annotation_dir, split) + ".json"
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
abs_path = self._parse_image_path(entry)
if not os.path.exists(abs_path):
raise IOError("Image: {} not exists.".format(abs_path))
label = self._check_load_bbox(_coco, entry)
if not label:
continue
im_aspect_ratios.append(float(entry["width"]) / entry["height"])
items.append(abs_path)
labels.append(label)
return items, labels, im_aspect_ratios
def _check_load_bbox(self, coco, entry):
"""
Check and load ground-truth labels.
"""
entry_id = entry['id']
# fix pycocotools _isArrayLike which don't work for str in python3
entry_id = [entry_id] if not isinstance(entry_id, (list, tuple)) else entry_id
ann_ids = coco.getAnnIds(imgIds=entry_id, iscrowd=None)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
width = entry["width"]
height = entry["height"]
for obj in objs:
if obj["area"] < self._min_object_area:
continue
if obj.get("ignore", 0) == 1:
continue
if not self._use_crowd and obj.get("iscrowd", 0):
continue
# convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound
xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height)
# require non-zero box area
if obj["area"] > 0 and xmax > xmin and ymax > ymin:
contiguous_cid = self.json_id_to_contiguous[obj["category_id"]]
valid_objs.append([xmin, ymin, xmax, ymax, contiguous_cid])
if not valid_objs:
if not self._skip_empty:
# dummy invalid labels if no valid objects are found
valid_objs.append([-1, -1, -1, -1, -1])
return valid_objs
@staticmethod
def bbox_clip_xyxy(xyxy, width, height):
"""
Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary.
All bounding boxes will be clipped to the new region `(0, 0, width, height)`.
Parameters:
----------
xyxy : list, tuple or numpy.ndarray
The bbox in format (xmin, ymin, xmax, ymax).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
width : int or float
Boundary width.
height : int or float
Boundary height.
Returns:
-------
tuple or np.array
Description of returned object.
"""
if isinstance(xyxy, (tuple, list)):
if not len(xyxy) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy)))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[3]))
return x1, y1, x2, y2
elif isinstance(xyxy, np.ndarray):
if not xyxy.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3]))
return np.hstack((x1, y1, x2, y2))
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy)))
@staticmethod
def bbox_xywh_to_xyxy(xywh):
"""
Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax)
Parameters:
----------
xywh : list, tuple or numpy.ndarray
The bbox in format (x, y, w, h).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
Returns:
-------
tuple or np.ndarray
The converted bboxes in format (xmin, ymin, xmax, ymax).
If input is numpy.ndarray, return is numpy.ndarray correspondingly.
"""
if isinstance(xywh, (tuple, list)):
if not len(xywh) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh)))
w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0)
return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h
elif isinstance(xywh, np.ndarray):
if not xywh.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape))
xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1)))
return xyxy
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh)))
# ---------------------------------------------------------------------------------------------------------------------
class CocoDetValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
self._height = self.image_size[0]
self._width = self.image_size[1]
self._mean = np.array(ds_metainfo.mean_rgb, dtype=np.float32).reshape(1, 1, 3)
self._std = np.array(ds_metainfo.std_rgb, dtype=np.float32).reshape(1, 1, 3)
def __call__(self, src, label):
# resize
img, bbox = src.asnumpy(), label
input_h, input_w = self._height, self._width
h, w, _ = src.shape
s = max(h, w) * 1.0
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = self.get_affine_transform(c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
output_w = input_w
output_h = input_h
trans_output = self.get_affine_transform(c, s, 0, [output_w, output_h])
for i in range(bbox.shape[0]):
bbox[i, :2] = self.affine_transform(bbox[i, :2], trans_output)
bbox[i, 2:4] = self.affine_transform(bbox[i, 2:4], trans_output)
bbox[:, :2] = np.clip(bbox[:, :2], 0, output_w - 1)
bbox[:, 2:4] = np.clip(bbox[:, 2:4], 0, output_h - 1)
img = inp
# to tensor
img = img.astype(np.float32) / 255.0
img = (img - self._mean) / self._std
img = img.transpose(2, 0, 1).astype(np.float32)
img = mx.nd.array(img)
return img, bbox.astype(img.dtype)
@staticmethod
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
"""
Get affine transform matrix given center, scale and rotation.
Parameters:
----------
center : tuple of float
Center point.
scale : float
Scaling factor.
rot : float
Rotation degree.
output_size : tuple of int
(width, height) of the output size.
shift : float
Shift factor.
inv : bool
Whether inverse the computation.
Returns:
-------
numpy.ndarray
Affine matrix.
"""
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = CocoDetValTransform.get_rot_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = CocoDetValTransform.get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = CocoDetValTransform.get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
@staticmethod
def get_rot_dir(src_point, rot_rad):
"""
Get rotation direction.
Parameters:
----------
src_point : tuple of float
Original point.
rot_rad : float
Rotation radian.
Returns:
-------
tuple of float
Rotation.
"""
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
@staticmethod
def get_3rd_point(a, b):
"""
Get the 3rd point position given first two points.
Parameters:
----------
a : tuple of float
First point.
b : tuple of float
Second point.
Returns:
-------
tuple of float
Third point.
"""
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
@staticmethod
def affine_transform(pt, t):
"""
Apply affine transform to a bounding box given transform matrix t.
Parameters:
----------
pt : numpy.ndarray
Bounding box with shape (1, 2).
t : numpy.ndarray
Transformation matrix with shape (2, 3).
Returns:
-------
numpy.ndarray
New bounding box with shape (1, 2).
"""
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
class Tuple(object):
"""
Wrap multiple batchify functions to form a function apply each input function on each
input fields respectively.
"""
def __init__(self, fn, *args):
if isinstance(fn, (list, tuple)):
self._fn = fn
else:
self._fn = (fn,) + args
def __call__(self, data):
"""
Batchify the input data.
Parameters:
----------
data : list
The samples to batchfy. Each sample should contain N attributes.
Returns:
-------
tuple
A tuple of length N. Contains the batchified result of each attribute in the input.
"""
ret = []
for i, ele_fn in enumerate(self._fn):
ret.append(ele_fn([ele[i] for ele in data]))
return tuple(ret)
class Stack(object):
"""
Stack the input data samples to construct the batch.
"""
def __call__(self, data):
"""
Batchify the input data.
Parameters:
----------
data : list
The input data samples
Returns:
-------
NDArray
Result.
"""
return self._stack_arrs(data, True)
@staticmethod
def _stack_arrs(arrs, use_shared_mem=False):
"""
Internal imple for stacking arrays.
"""
if isinstance(arrs[0], mx.nd.NDArray):
if use_shared_mem:
out = mx.nd.empty((len(arrs),) + arrs[0].shape, dtype=arrs[0].dtype,
ctx=mx.Context("cpu_shared", 0))
return mx.nd.stack(*arrs, out=out)
else:
return mx.nd.stack(*arrs)
else:
out = np.asarray(arrs)
if use_shared_mem:
return mx.nd.array(out, ctx=mx.Context("cpu_shared", 0))
else:
return mx.nd.array(out)
class Pad(object):
"""
Pad the input ndarrays along the specific padding axis and stack them to get the output.
"""
def __init__(self, axis=0, pad_val=0, num_shards=1, ret_length=False):
self._axis = axis
self._pad_val = pad_val
self._num_shards = num_shards
self._ret_length = ret_length
def __call__(self, data):
"""
Batchify the input data.
Parameters:
----------
data : list
A list of N samples. Each sample can be 1) ndarray or
2) a list/tuple of ndarrays
Returns:
-------
NDArray
Data in the minibatch. Shape is (N, ...)
NDArray, optional
The sequences' original lengths at the padded axis. Shape is (N,). This will only be
returned in `ret_length` is True.
"""
if isinstance(data[0], (mx.nd.NDArray, np.ndarray, list)):
padded_arr, original_length = self._pad_arrs_to_max_length(
data, self._axis, self._pad_val, self._num_shards, True)
if self._ret_length:
return padded_arr, original_length
else:
return padded_arr
else:
raise NotImplementedError
@staticmethod
def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, num_shards=1, use_shared_mem=False):
"""
Inner Implementation of the Pad batchify.
"""
if not isinstance(arrs[0], (mx.nd.NDArray, np.ndarray)):
arrs = [np.asarray(ele) for ele in arrs]
if isinstance(pad_axis, tuple):
original_length = []
for axis in pad_axis:
original_length.append(np.array([ele.shape[axis] for ele in arrs]))
original_length = np.stack(original_length).T
else:
original_length = np.array([ele.shape[pad_axis] for ele in arrs])
pad_axis = [pad_axis]
if len(original_length) % num_shards != 0:
logging.warning(
'Batch size cannot be evenly split. Trying to shard %d items into %d shards',
len(original_length), num_shards)
original_length = np.array_split(original_length, num_shards)
max_lengths = [np.max(ll, axis=0, keepdims=len(pad_axis) == 1) for ll in original_length]
# add batch dimension
ret_shape = [[ll.shape[0], ] + list(arrs[0].shape) for ll in original_length]
for i, shape in enumerate(ret_shape):
for j, axis in enumerate(pad_axis):
shape[1 + axis] = max_lengths[i][j]
if use_shared_mem:
ret = [mx.nd.full(shape=tuple(shape), val=pad_val, ctx=mx.Context('cpu_shared', 0),
dtype=arrs[0].dtype) for shape in ret_shape]
original_length = [mx.nd.array(ll, ctx=mx.Context('cpu_shared', 0),
dtype=np.int32) for ll in original_length]
else:
ret = [mx.nd.full(shape=tuple(shape), val=pad_val, dtype=arrs[0].dtype) for shape in
ret_shape]
original_length = [mx.nd.array(ll, dtype=np.int32) for ll in original_length]
for i, arr in enumerate(arrs):
if ret[i // ret[0].shape[0]].shape[1:] == arr.shape:
ret[i // ret[0].shape[0]][i % ret[0].shape[0]] = arr
else:
slices = [slice(0, ll) for ll in arr.shape]
ret[i // ret[0].shape[0]][i % ret[0].shape[0]][tuple(slices)] = arr
if len(ret) == len(original_length) == 1:
return ret[0], original_length[0]
return ret, original_length
def get_post_transform(orig_w, orig_h, out_w, out_h):
"""Get the post prediction affine transforms. This will be used to adjust the prediction results
according to original coco image resolutions.
Parameters:
----------
orig_w : int
Original width of the image.
orig_h : int
Original height of the image.
out_w : int
Width of the output image after prediction.
out_h : int
Height of the output image after prediction.
Returns:
-------
numpy.ndarray
Affine transform matrix 3x2.
"""
s = max(orig_w, orig_h) * 1.0
c = np.array([orig_w / 2., orig_h / 2.], dtype=np.float32)
trans_output = CocoDetValTransform.get_affine_transform(c, s, 0, [out_w, out_h], inv=True)
return trans_output
class CocoDetMetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoDetMetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoDetDataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = CocoDetDataset.classes
self.input_image_size = (512, 512)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.mAP"]
self.test_metric_names = ["CocoDetMApMetric"]
self.test_metric_extra_kwargs = [
{"name": "mAP",
"img_height": 512,
"coco_annotations_file_path": None,
"contiguous_id_to_json": None,
"data_shape": None,
"post_affine": get_post_transform}]
self.dataset_class_extra_kwargs = {"skip_empty": False}
self.saver_acc_ind = 0
self.do_transform = True
self.do_transform_first = False
self.last_batch = "keep"
self.batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
self.val_transform = CocoDetValTransform
self.test_transform = CocoDetValTransform
self.ml_type = "det"
self.allow_hybridize = False
self.test_net_extra_kwargs = None
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoDetMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoDetMetaInfo, self).update(args)
self.input_image_size = args.input_size
self.test_metric_extra_kwargs[0]["img_height"] = self.input_image_size[0]
self.test_metric_extra_kwargs[0]["data_shape"] = self.input_image_size
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
self.test_metric_extra_kwargs[0]["contiguous_id_to_json"] = dataset.contiguous_id_to_json
| 27,151
| 35.691892
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/ade20k_seg_dataset.py
|
"""
ADE20K semantic segmentation dataset.
"""
import os
import numpy as np
import mxnet as mx
from PIL import Image
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class ADE20KSegDataset(SegDataset):
"""
ADE20K semantic segmentation dataset.
Parameters:
----------
root : str
Path to a folder with `ADEChallengeData2016` subfolder.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(ADE20KSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
base_dir_path = os.path.join(root, "ADEChallengeData2016")
assert os.path.exists(base_dir_path), "Please prepare dataset"
image_dir_path = os.path.join(base_dir_path, "images")
mask_dir_path = os.path.join(base_dir_path, "annotations")
mode_dir_name = "training" if mode == "train" else "validation"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_file_name in os.listdir(image_dir_path):
image_file_stem, _ = os.path.splitext(image_file_name)
if image_file_name.endswith(".jpg"):
image_file_path = os.path.join(image_dir_path, image_file_name)
mask_file_name = image_file_stem + ".png"
mask_file_path = os.path.join(mask_dir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(base_dir_path))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
# image = mx.image.imread(self.images[index])
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# mask = mx.image.imread(self.masks[index])
if self.mode == "train":
image, mask = self._train_sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert (self.mode == "test")
image = self._img_transform(image)
mask = self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 150
vague_idx = 150
use_vague = True
background_idx = -1
ignore_bg = False
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask[np_mask == 0] = ADE20KSegDataset.vague_idx + 1
np_mask -= 1
return mx.nd.array(np_mask, mx.cpu())
def __len__(self):
return len(self.images)
class ADE20KMetaInfo(VOCMetaInfo):
def __init__(self):
super(ADE20KMetaInfo, self).__init__()
self.label = "ADE20K"
self.short_label = "voc"
self.root_dir_name = "ade20k"
self.dataset_class = ADE20KSegDataset
self.num_classes = ADE20KSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"macro_average": False},
{"num_classes": ADE20KSegDataset.classes,
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"bg_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg,
"macro_average": False}]
| 4,339
| 34
| 93
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/dataset_metainfo.py
|
"""
Base dataset metainfo class.
"""
import os
class DatasetMetaInfo(object):
"""
Base descriptor of dataset.
"""
def __init__(self):
self.use_imgrec = False
self.do_transform = False
self.do_transform_first = True
self.last_batch = None
self.batchify_fn = None
self.label = None
self.root_dir_name = None
self.root_dir_path = None
self.dataset_class = None
self.dataset_class_extra_kwargs = None
self.num_training_samples = None
self.in_channels = None
self.num_classes = None
self.input_image_size = None
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.train_use_weighted_sampler = False
self.val_metric_capts = None
self.val_metric_names = None
self.val_metric_extra_kwargs = None
self.test_metric_capts = None
self.test_metric_names = None
self.test_metric_extra_kwargs = None
self.saver_acc_ind = None
self.ml_type = None
self.allow_hybridize = True
self.train_net_extra_kwargs = {"root": os.path.join("~", ".mxnet", "models")}
self.test_net_extra_kwargs = None
self.load_ignore_extra = False
self.loss_name = None
self.loss_extra_kwargs = None
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
parser.add_argument(
"--data-dir",
type=str,
default=os.path.join(work_dir_path, self.root_dir_name),
help="path to directory with {} dataset".format(self.label))
parser.add_argument(
"--num-classes",
type=int,
default=self.num_classes,
help="number of classes")
parser.add_argument(
"--in-channels",
type=int,
default=self.in_channels,
help="number of input channels")
parser.add_argument(
"--net-root",
type=str,
default=os.path.join("~", ".mxnet", "models"),
help="root for pretrained net cache")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
self.root_dir_path = args.data_dir
self.num_classes = args.num_classes
self.in_channels = args.in_channels
self.train_net_extra_kwargs["root"] = args.net_root
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
pass
| 3,226
| 29.158879
| 85
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/seg_dataset.py
|
import random
import numpy as np
import mxnet as mx
from PIL import Image, ImageOps, ImageFilter
from mxnet.gluon.data import dataset
class SegDataset(dataset.Dataset):
"""
Segmentation base dataset.
Parameters:
----------
root : str
Path to data folder.
mode : str
'train', 'val', 'test', or 'demo'.
transform : callable
A function that transforms the image.
"""
def __init__(self,
root,
mode,
transform,
base_size=520,
crop_size=480):
assert (mode in ("train", "val", "test", "demo"))
self.root = root
self.mode = mode
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
def _val_sync_transform(self, image, mask, ctx=mx.cpu()):
short_size = self.crop_size
w, h = image.size
if w > h:
oh = short_size
ow = int(float(w * oh) / h)
else:
ow = short_size
oh = int(float(h * ow) / w)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# Center crop:
outsize = self.crop_size
x1 = int(round(0.5 * (ow - outsize)))
y1 = int(round(0.5 * (oh - outsize)))
image = image.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# Final transform:
image, mask = self._img_transform(image, ctx=ctx), self._mask_transform(mask, ctx=ctx)
return image, mask
def _train_sync_transform(self, image, mask, ctx=mx.cpu()):
# Random mirror:
if random.random() < 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
# Random scale (short edge):
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = image.size
if w > h:
oh = short_size
ow = int(float(w * oh) / h)
else:
ow = short_size
oh = int(float(h * ow) / w)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# Pad crop:
crop_size = self.crop_size
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# Random crop crop_size:
w, h = image.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# Gaussian blur as in PSP:
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(radius=random.random()))
# Final transform:
image, mask = self._img_transform(image, ctx=ctx), self._mask_transform(mask, ctx=ctx)
return image, mask
@staticmethod
def _img_transform(image, ctx=mx.cpu()):
return mx.nd.array(np.array(image), ctx=ctx)
@staticmethod
def _mask_transform(mask, ctx=mx.cpu()):
return mx.nd.array(np.array(mask), ctx=ctx, dtype=np.int32)
| 3,490
| 34.622449
| 94
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/coco_hpe2_dataset.py
|
"""
COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose).
"""
import os
import json
import math
import cv2
from operator import itemgetter
import numpy as np
from mxnet.gluon.data import dataset
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe2Dataset(dataset.Dataset):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe2Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
with open(annotations_file_path, "r") as f:
self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
file_name = self.file_names[idx]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
img_mean = (128, 128, 128)
img_scale = 1.0 / 256
base_height = 368
stride = 8
pad_value = (0, 0, 0)
height, width, _ = image.shape
image = self.normalize(image, img_mean, img_scale)
ratio = base_height / float(image.shape[0])
image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
min_dims = [base_height, max(image.shape[1], base_height)]
image, pad = self.pad_width(
image,
stride,
pad_value,
min_dims)
image = image.astype(np.float32)
image = image.transpose((2, 0, 1))
# image = torch.from_numpy(image)
# if self.transform is not None:
# image = self.transform(image)
image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + [height, width], np.float32)
# label = torch.from_numpy(label)
return image, label
@staticmethod
def normalize(img,
img_mean,
img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
@staticmethod
def pad_width(img,
stride,
pad_value,
min_dims):
h, w, _ = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
top = int(math.floor((min_dims[0] - h) / 2.0))
left = int(math.floor((min_dims[1] - w) / 2.0))
bottom = int(min_dims[0] - h - top)
right = int(min_dims[1] - w - left)
pad = [top, left, bottom, right]
padded_img = cv2.copyMakeBorder(
src=img,
top=top,
bottom=bottom,
left=left,
right=right,
borderType=cv2.BORDER_CONSTANT,
value=pad_value)
return padded_img, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def extract_keypoints(heatmap,
all_keypoints,
total_keypoint_num):
heatmap[heatmap < 0.1] = 0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode="constant")
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1]
heatmap_peaks = (heatmap_center > heatmap_left) &\
(heatmap_center > heatmap_right) &\
(heatmap_center > heatmap_up) &\
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h)
keypoints = sorted(keypoints, key=itemgetter(0))
suppressed = np.zeros(len(keypoints), np.uint8)
keypoints_with_score_and_id = []
keypoint_num = 0
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i + 1, len(keypoints)):
if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + (keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
suppressed[j] = 1
keypoint_with_score_and_id = (
keypoints[i][0],
keypoints[i][1],
heatmap[keypoints[i][1], keypoints[i][0]],
total_keypoint_num + keypoint_num)
keypoints_with_score_and_id.append(keypoint_with_score_and_id)
keypoint_num += 1
all_keypoints.append(keypoints_with_score_and_id)
return keypoint_num
def group_keypoints(all_keypoints_by_type,
pafs,
pose_entry_size=20,
min_paf_score=0.05):
def linspace2d(start, stop, n=10):
points = 1 / (n - 1) * (stop - start)
return points[:, None] * np.arange(n) + start[:, None]
BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
[6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19],
[26, 27])
pose_entries = []
all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist])
for part_id in range(len(BODY_PARTS_PAF_IDS)):
part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]]
kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]]
num_kpts_a = len(kpts_a)
num_kpts_b = len(kpts_b)
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part
continue
elif num_kpts_a == 0: # body part has just 'b' keypoints
for i in range(num_kpts_b):
num = 0
for j in range(len(pose_entries)): # check if already in some pose, was added by another body part
if pose_entries[j][kpt_b_id] == kpts_b[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx
pose_entry[-1] = 1 # num keypoints in pose
pose_entry[-2] = kpts_b[i][2] # pose score
pose_entries.append(pose_entry)
continue
elif num_kpts_b == 0: # body part has just 'a' keypoints
for i in range(num_kpts_a):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == kpts_a[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = kpts_a[i][3]
pose_entry[-1] = 1
pose_entry[-2] = kpts_a[i][2]
pose_entries.append(pose_entry)
continue
connections = []
for i in range(num_kpts_a):
kpt_a = np.array(kpts_a[i][0:2])
for j in range(num_kpts_b):
kpt_b = np.array(kpts_b[j][0:2])
mid_point = [(), ()]
mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)),
int(round((kpt_a[1] + kpt_b[1]) * 0.5)))
mid_point[1] = mid_point[0]
vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]]
vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2)
if vec_norm == 0:
continue
vec[0] /= vec_norm
vec[1] /= vec_norm
cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] +
vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1])
height_n = pafs.shape[0] // 2
success_ratio = 0
point_num = 10 # number of points to integration over paf
if cur_point_score > -100:
passed_point_score = 0
passed_point_num = 0
x, y = linspace2d(kpt_a, kpt_b)
for point_idx in range(point_num):
px = int(round(x[point_idx]))
py = int(round(y[point_idx]))
paf = part_pafs[py, px, 0:2]
cur_point_score = vec[0] * paf[0] + vec[1] * paf[1]
if cur_point_score > min_paf_score:
passed_point_score += cur_point_score
passed_point_num += 1
success_ratio = passed_point_num / point_num
ratio = 0
if passed_point_num > 0:
ratio = passed_point_score / passed_point_num
ratio += min(height_n / vec_norm - 1, 0)
if ratio > 0 and success_ratio > 0.8:
score_all = ratio + kpts_a[i][2] + kpts_b[j][2]
connections.append([i, j, ratio, score_all])
if len(connections) > 0:
connections = sorted(connections, key=itemgetter(2), reverse=True)
num_connections = min(num_kpts_a, num_kpts_b)
has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32)
has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32)
filtered_connections = []
for row in range(len(connections)):
if len(filtered_connections) == num_connections:
break
i, j, cur_point_score = connections[row][0:3]
if not has_kpt_a[i] and not has_kpt_b[j]:
filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score])
has_kpt_a[i] = 1
has_kpt_b[j] = 1
connections = filtered_connections
if len(connections) == 0:
continue
if part_id == 0:
pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))]
for i in range(len(connections)):
pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
pose_entries[i][-1] = 2
pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
elif part_id == 17 or part_id == 18:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
pose_entries[j][kpt_b_id] = connections[i][1]
elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
pose_entries[j][kpt_a_id] = connections[i][0]
continue
else:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0]:
pose_entries[j][kpt_b_id] = connections[i][1]
num += 1
pose_entries[j][-1] += 1
pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2]
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = connections[i][0]
pose_entry[kpt_b_id] = connections[i][1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
pose_entries.append(pose_entry)
filtered_entries = []
for i in range(len(pose_entries)):
if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
continue
filtered_entries.append(pose_entries[i])
pose_entries = np.asarray(filtered_entries)
return pose_entries, all_keypoints
def convert_to_coco_format(pose_entries, all_keypoints):
coco_keypoints = []
scores = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
keypoints = [0] * 17 * 3
to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
person_score = pose_entries[n][-2]
position_id = -1
for keypoint_id in pose_entries[n][:-2]:
position_id += 1
if position_id == 1: # no 'neck' in COCO
continue
cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found
if keypoint_id != -1:
cx, cy, score = all_keypoints[int(keypoint_id), 0:3]
cx = cx + 0.5
cy = cy + 0.5
visibility = 1
keypoints[to_coco_map[position_id] * 3 + 0] = cx
keypoints[to_coco_map[position_id] * 3 + 1] = cy
keypoints[to_coco_map[position_id] * 3 + 2] = visibility
coco_keypoints.append(keypoints)
scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck'
return coco_keypoints, scores
def recalc_pose(pred,
label):
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
heights = label[:, 6].astype(np.int32)
widths = label[:, 7].astype(np.int32)
keypoints = 19
stride = 8
heatmap2ds = pred[:, :keypoints]
paf2ds = pred[:, keypoints:(3 * keypoints)]
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
height = int(heights[batch_i])
width = int(widths[batch_i])
heatmap2d = heatmap2ds[batch_i]
paf2d = paf2ds[batch_i]
heatmaps = np.transpose(heatmap2d, (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :]
heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC)
pafs = np.transpose(paf2d, (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :]
pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18): # 19th for bg
total_keypoints_num += extract_keypoints(
heatmaps[:, :, kpt_idx],
all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(
all_keypoints_by_type,
pafs)
coco_keypoints, scores = convert_to_coco_format(
pose_entries,
all_keypoints)
pred_pts_score.append(coco_keypoints)
pred_person_score.append(scores)
label_img_id_.append([label_img_id_i] * len(scores))
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0])
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe2MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe2Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (368, 368)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpe2ValTransform
self.test_transform = CocoHpe2ValTransform
self.ml_type = "hpe"
self.test_net_extra_kwargs = None
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe2MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
| 20,786
| 39.8389
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/svhn_cls_dataset.py
|
"""
SVHN classification dataset.
"""
import os
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.utils import download, check_sha1
from .cifar10_cls_dataset import CIFAR10MetaInfo
class SVHN(gluon.data.dataset._DownloadedDataset):
"""
SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/.
Each sample is an image (in 3D NDArray) with shape (32, 32, 3).
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0`.
Parameters:
----------
root : str, default $MXNET_HOME/datasets/svhn
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A user defined callback that transforms each sample.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "svhn"),
mode="train",
transform=None):
self._mode = mode
self._train_data = [("http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "train_32x32.mat",
"e6588cae42a1a5ab5efe608cc5cd3fb9aaffd674")]
self._test_data = [("http://ufldl.stanford.edu/housenumbers/test_32x32.mat", "test_32x32.mat",
"29b312382ca6b9fba48d41a7b5c19ad9a5462b20")]
super(SVHN, self).__init__(root, transform)
def _get_data(self):
if any(not os.path.exists(path) or not check_sha1(path, sha1) for path, sha1 in
((os.path.join(self._root, name), sha1) for _, name, sha1 in self._train_data + self._test_data)):
for url, _, sha1 in self._train_data + self._test_data:
download(url=url, path=self._root, sha1_hash=sha1)
if self._mode == "train":
data_files = self._train_data[0]
else:
data_files = self._test_data[0]
import scipy.io as sio
loaded_mat = sio.loadmat(os.path.join(self._root, data_files[1]))
data = loaded_mat["X"]
data = np.transpose(data, (3, 0, 1, 2))
self._data = mx.nd.array(data, dtype=data.dtype)
self._label = loaded_mat["y"].astype(np.int32).squeeze()
np.place(self._label, self._label == 10, 0)
class SVHNMetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(SVHNMetaInfo, self).__init__()
self.label = "SVHN"
self.root_dir_name = "svhn"
self.dataset_class = SVHN
self.num_training_samples = 73257
| 2,574
| 35.785714
| 113
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/coco_hpe3_dataset.py
|
"""
COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose).
"""
import os
# import json
import math
import cv2
import numpy as np
from mxnet.gluon.data import dataset
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe3Dataset(dataset.Dataset):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe3Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
# with open(annotations_file_path, "r") as f:
# self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
from pycocotools.coco import COCO
self.coco_gt = COCO(self.annotations_file_path)
self.validation_ids = self.coco_gt.getImgIds()[:]
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.validation_ids)
def __getitem__(self, idx):
# file_name = self.file_names[idx]["file_name"]
image_id = self.validation_ids[idx]
file_name = self.coco_gt.imgs[image_id]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
image_src_shape = image.shape[:2]
boxsize = 512
max_downsample = 64
pad_value = 128
scale = boxsize / image.shape[0]
if scale * image.shape[0] > 2600 or scale * image.shape[1] > 3800:
scale = min(2600 / image.shape[0], 3800 / image.shape[1])
image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
image, pad = self.pad_right_down_corner(image, max_downsample, pad_value)
image = np.float32(image / 255)
image = image.transpose((2, 0, 1))
# image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + list(image_src_shape), np.float32)
return image, label
@staticmethod
def pad_right_down_corner(img,
stride,
pad_value):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def recalc_pose(pred,
label):
dt_gt_mapping = {0: 0, 1: None, 2: 6, 3: 8, 4: 10, 5: 5, 6: 7, 7: 9, 8: 12, 9: 14, 10: 16, 11: 11, 12: 13, 13: 15,
14: 2, 15: 1, 16: 4, 17: 3}
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne",
"Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
limb_from = ['neck', 'neck', 'neck', 'neck', 'neck', 'nose', 'nose', 'Reye', 'Leye', 'neck', 'Rsho', 'Relb', 'neck',
'Lsho', 'Lelb', 'neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'nose', 'nose', 'Rsho', 'Rhip', 'Lsho',
'Lhip', 'Rear', 'Lear', 'Rhip']
limb_to = ['nose', 'Reye', 'Leye', 'Rear', 'Lear', 'Reye', 'Leye', 'Rear', 'Lear', 'Rsho', 'Relb', 'Rwri', 'Lsho',
'Lelb', 'Lwri', 'Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Lsho', 'Rhip', 'Lkne', 'Lhip',
'Rkne', 'Rsho', 'Lsho', 'Lhip']
limb_from = [parts_dict[n] for n in limb_from]
limb_to = [parts_dict[n] for n in limb_to]
assert limb_from == [x for x in [
1, 1, 1, 1, 1, 0, 0, 14, 15, 1, 2, 3, 1, 5, 6, 1, 8, 9, 1, 11, 12, 0, 0, 2, 8, 5, 11, 16, 17, 8]]
assert limb_to == [x for x in [
0, 14, 15, 16, 17, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 5, 8, 12, 11, 9, 2, 5, 11]]
limbs_conn = list(zip(limb_from, limb_to))
limb_seq = limbs_conn
paf_layers = 30
num_layers = 50
stride = 4
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
image_src_shapes = label[:, 6:8].astype(np.int32)
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
image_src_shape = list(image_src_shapes[batch_i])
output_blob = pred[batch_i].transpose((1, 2, 0))
output_paf = output_blob[:, :, :paf_layers]
output_heatmap = output_blob[:, :, paf_layers:num_layers]
heatmap = cv2.resize(output_heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
heatmap = cv2.resize(heatmap, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(output_paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
paf = cv2.resize(paf, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
all_peaks = find_peaks(heatmap)
connection_all, special_k = find_connections(all_peaks, paf, image_src_shape[0], limb_seq)
subset, candidate = find_people(connection_all, special_k, all_peaks, limb_seq)
for s in subset[..., 0]:
keypoint_indexes = s[:18]
person_keypoint_coordinates = []
for index in keypoint_indexes:
if index == -1:
X, Y, C = 0, 0, 0
else:
X, Y, C = list(candidate[index.astype(int)][:2]) + [1]
person_keypoint_coordinates.append([X, Y, C])
person_keypoint_coordinates_coco = [None] * 17
for dt_index, gt_index in dt_gt_mapping.items():
if gt_index is None:
continue
person_keypoint_coordinates_coco[gt_index] = person_keypoint_coordinates[dt_index]
pred_pts_score.append(person_keypoint_coordinates_coco)
pred_person_score.append(1 - 1.0 / s[18])
label_img_id_.append(label_img_id_i)
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score), np.array(label_img_id_)
def find_peaks(heatmap_avg):
import torch
thre1 = 0.1
offset_radius = 2
all_peaks = []
peak_counter = 0
heatmap_avg = heatmap_avg.astype(np.float32)
filter_map = heatmap_avg[:, :, :18].copy().transpose((2, 0, 1))[None, ...]
filter_map = torch.from_numpy(filter_map).cuda()
filter_map = keypoint_heatmap_nms(filter_map, kernel=3, thre=thre1)
filter_map = filter_map.cpu().numpy().squeeze().transpose((1, 2, 0))
for part in range(18):
map_ori = heatmap_avg[:, :, part]
peaks_binary = filter_map[:, :, part]
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
refined_peaks_with_score = [refine_centroid(map_ori, anchor, offset_radius) for anchor in peaks]
id = range(peak_counter, peak_counter + len(refined_peaks_with_score))
peaks_with_score_and_id = [refined_peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
return all_peaks
def keypoint_heatmap_nms(heat, kernel=3, thre=0.1):
from torch.nn import functional as F
# keypoint NMS on heatmap (score map)
pad = (kernel - 1) // 2
pad_heat = F.pad(heat, (pad, pad, pad, pad), mode="reflect")
hmax = F.max_pool2d(pad_heat, (kernel, kernel), stride=1, padding=0)
keep = (hmax == heat).float() * (heat >= thre).float()
return heat * keep
def refine_centroid(scorefmp, anchor, radius):
"""
Refine the centroid coordinate. It dose not affect the results after testing.
:param scorefmp: 2-D numpy array, original regressed score map
:param anchor: python tuple, (x,y) coordinates
:param radius: int, range of considered scores
:return: refined anchor, refined score
"""
x_c, y_c = anchor
x_min = x_c - radius
x_max = x_c + radius + 1
y_min = y_c - radius
y_max = y_c + radius + 1
if y_max > scorefmp.shape[0] or y_min < 0 or x_max > scorefmp.shape[1] or x_min < 0:
return anchor + (scorefmp[y_c, x_c], )
score_box = scorefmp[y_min:y_max, x_min:x_max]
x_grid, y_grid = np.mgrid[-radius:radius + 1, -radius:radius + 1]
offset_x = (score_box * x_grid).sum() / score_box.sum()
offset_y = (score_box * y_grid).sum() / score_box.sum()
x_refine = x_c + offset_x
y_refine = y_c + offset_y
refined_anchor = (x_refine, y_refine)
return refined_anchor + (score_box.mean(),)
def find_connections(all_peaks, paf_avg, image_width, limb_seq):
mid_num_ = 20
thre2 = 0.1
connect_ration = 0.8
connection_all = []
special_k = []
for k in range(len(limb_seq)):
score_mid = paf_avg[:, :, k]
candA = all_peaks[limb_seq[k][0]]
candB = all_peaks[limb_seq[k][1]]
nA = len(candA)
nB = len(candB)
if nA != 0 and nB != 0:
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
mid_num = min(int(round(norm + 1)), mid_num_)
if norm == 0:
continue
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
limb_response = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0]))] for
I in range(len(startend))])
score_midpts = limb_response
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * image_width / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) >= connect_ration * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([
i,
j,
score_with_dist_prior,
norm,
0.5 * score_with_dist_prior + 0.25 * candA[i][2] + 0.25 * candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[4], reverse=True)
connection = np.zeros((0, 6))
for c in range(len(connection_candidate)):
i, j, s, limb_len = connection_candidate[c][0:4]
if i not in connection[:, 3] and j not in connection[:, 4]:
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j, limb_len]])
if len(connection) >= min(nA, nB):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
return connection_all, special_k
def find_people(connection_all, special_k, all_peaks, limb_seq):
len_rate = 16.0
connection_tole = 0.7
remove_recon = 0
subset = -1 * np.ones((0, 20, 2))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(limb_seq)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limb_seq[k])
for i in range(len(connection_all[k])):
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)):
if subset[j][indexA][0].astype(int) == (partAs[i]).astype(int) or subset[j][indexB][0].astype(
int) == partBs[i].astype(int):
if found >= 2:
continue
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB][0].astype(int) == -1 and\
len_rate * subset[j][-1][1] > connection_all[k][i][-1]:
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-1][0] += 1
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) != partBs[i].astype(int):
if subset[j][indexB][1] >= connection_all[k][i][2]:
pass
else:
if len_rate * subset[j][-1][1] <= connection_all[k][i][-1]:
continue
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) == partBs[i].astype(int) and\
subset[j][indexB][1] <= connection_all[k][i][2]:
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
else:
pass
elif found == 2:
j1, j2 = subset_idx
membership1 = ((subset[j1][..., 0] >= 0).astype(int))[:-2]
membership2 = ((subset[j2][..., 0] >= 0).astype(int))[:-2]
membership = membership1 + membership2
if len(np.nonzero(membership == 2)[0]) == 0:
min_limb1 = np.min(subset[j1, :-2, 1][membership1 == 1])
min_limb2 = np.min(subset[j2, :-2, 1][membership2 == 1])
min_tolerance = min(min_limb1, min_limb2)
if connection_all[k][i][2] < connection_tole * min_tolerance or\
len_rate * subset[j1][-1][1] <= connection_all[k][i][-1]:
continue
subset[j1][:-2][...] += (subset[j2][:-2][...] + 1)
subset[j1][-2:][:, 0] += subset[j2][-2:][:, 0]
subset[j1][-2][0] += connection_all[k][i][2]
subset[j1][-1][1] = max(connection_all[k][i][-1], subset[j1][-1][1])
subset = np.delete(subset, j2, 0)
else:
if connection_all[k][i][0] in subset[j1, :-2, 0]:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][0])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][1])
else:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][1])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][0])
c1 = int(c1[0])
c2 = int(c2[0])
assert c1 != c2, "an candidate keypoint is used twice, shared by two people"
if connection_all[k][i][2] < subset[j1][c1][1] and connection_all[k][i][2] < subset[j2][c2][1]:
continue
small_j = j1
remove_c = c1
if subset[j1][c1][1] > subset[j2][c2][1]:
small_j = j2
remove_c = c2
if remove_recon > 0:
subset[small_j][-2][0] -= candidate[subset[small_j][remove_c][0].astype(int), 2] + \
subset[small_j][remove_c][1]
subset[small_j][remove_c][0] = -1
subset[small_j][remove_c][1] = -1
subset[small_j][-1][0] -= 1
elif not found and k < len(limb_seq):
row = -1 * np.ones((20, 2))
row[indexA][0] = partAs[i]
row[indexA][1] = connection_all[k][i][2]
row[indexB][0] = partBs[i]
row[indexB][1] = connection_all[k][i][2]
row[-1][0] = 2
row[-1][1] = connection_all[k][i][-1]
row[-2][0] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
row = row[np.newaxis, :, :]
subset = np.concatenate((subset, row), axis=0)
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1][0] < 2 or subset[i][-2][0] / subset[i][-1][0] < 0.45:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
return subset, candidate
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe3MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe3MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe3Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (256, 256)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"validation_ids": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpe2ValTransform
self.test_transform = CocoHpe2ValTransform
self.ml_type = "hpe"
self.test_net_extra_kwargs = None
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe3MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
# self.test_metric_extra_kwargs[0]["validation_ids"] = dataset.validation_ids
| 23,125
| 40.003546
| 120
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/imagenet1k_rec_cls_dataset.py
|
"""
ImageNet-1K classification dataset (via MXNet image record iterators).
"""
import os
import mxnet as mx
from .imagenet1k_cls_dataset import ImageNet1KMetaInfo, calc_val_resize_value
class ImageNet1KRecMetaInfo(ImageNet1KMetaInfo):
def __init__(self):
super(ImageNet1KRecMetaInfo, self).__init__()
self.use_imgrec = True
self.label = "ImageNet1K_rec"
self.root_dir_name = "imagenet_rec"
self.dataset_class = None
self.num_training_samples = 1281167
self.train_imgrec_file_path = "train.rec"
self.train_imgidx_file_path = "train.idx"
self.val_imgrec_file_path = "val.rec"
self.val_imgidx_file_path = "val.idx"
self.train_imgrec_iter = imagenet_train_imgrec_iter
self.val_imgrec_iter = imagenet_val_imgrec_iter
def imagenet_train_imgrec_iter(ds_metainfo,
batch_size,
num_workers,
mean_rgb=(123.68, 116.779, 103.939),
std_rgb=(58.393, 57.12, 57.375),
jitter_param=0.4,
lighting_param=0.1):
assert (isinstance(ds_metainfo.input_image_size, tuple) and len(ds_metainfo.input_image_size) == 2)
imgrec_file_path = os.path.join(ds_metainfo.root_dir_path, ds_metainfo.train_imgrec_file_path)
imgidx_file_path = os.path.join(ds_metainfo.root_dir_path, ds_metainfo.train_imgidx_file_path)
data_shape = (ds_metainfo.in_channels,) + ds_metainfo.input_image_size
kwargs = {
"path_imgrec": imgrec_file_path,
"path_imgidx": imgidx_file_path,
"preprocess_threads": num_workers,
"shuffle": True,
"batch_size": batch_size,
"data_shape": data_shape,
"mean_r": mean_rgb[0],
"mean_g": mean_rgb[1],
"mean_b": mean_rgb[2],
"std_r": std_rgb[0],
"std_g": std_rgb[1],
"std_b": std_rgb[2],
"rand_mirror": True,
"random_resized_crop": True,
"max_aspect_ratio": (4.0 / 3.0),
"min_aspect_ratio": (3.0 / 4.0),
"max_random_area": 1,
"min_random_area": 0.08,
"brightness": jitter_param,
"saturation": jitter_param,
"contrast": jitter_param,
"pca_noise": lighting_param
}
if ds_metainfo.aug_type == "aug0":
pass
elif ds_metainfo.aug_type == "aug1":
kwargs["inter_method"] = 10
elif ds_metainfo.aug_type == "aug2":
kwargs["inter_method"] = 10
kwargs["max_rotate_angle"] = 30
kwargs["max_shear_ratio"] = 0.05
else:
raise RuntimeError("Unknown augmentation type: {}\n".format(ds_metainfo.aug_type))
return mx.io.ImageRecordIter(**kwargs)
def imagenet_val_imgrec_iter(ds_metainfo,
batch_size,
num_workers,
mean_rgb=(123.68, 116.779, 103.939),
std_rgb=(58.393, 57.12, 57.375)):
assert (isinstance(ds_metainfo.input_image_size, tuple) and len(ds_metainfo.input_image_size) == 2)
imgrec_file_path = os.path.join(ds_metainfo.root_dir_path, ds_metainfo.val_imgrec_file_path)
imgidx_file_path = os.path.join(ds_metainfo.root_dir_path, ds_metainfo.val_imgidx_file_path)
data_shape = (ds_metainfo.in_channels,) + ds_metainfo.input_image_size
resize_value = calc_val_resize_value(
input_image_size=ds_metainfo.input_image_size,
resize_inv_factor=ds_metainfo.resize_inv_factor)
return mx.io.ImageRecordIter(
path_imgrec=imgrec_file_path,
path_imgidx=imgidx_file_path,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=resize_value,
data_shape=data_shape,
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2])
| 3,974
| 38.75
| 103
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/asr_dataset.py
|
"""
Automatic Speech Recognition (ASR) abstract dataset.
"""
__all__ = ['AsrDataset', 'asr_test_transform']
from mxnet.gluon.data import dataset
from mxnet.gluon.data.vision import transforms
from gluon.gluoncv2.models.jasper import NemoAudioReader
class AsrDataset(dataset.Dataset):
"""
Automatic Speech Recognition (ASR) abstract dataset.
Parameters:
----------
root : str
Path to the folder stored the dataset.
mode : str
'train', 'val', 'test', or 'demo'.
transform : func
A function that takes data and transforms it.
"""
def __init__(self,
root,
mode,
transform):
super(AsrDataset, self).__init__()
assert (mode in ("train", "val", "test", "demo"))
self.root = root
self.mode = mode
self._transform = transform
self.data = []
self.audio_reader = NemoAudioReader()
def __getitem__(self, index):
wav_file_path, label_text = self.data[index]
audio_data = self.audio_reader.read_from_file(wav_file_path)
audio_len = audio_data.shape[0]
return (audio_data, audio_len), label_text
def __len__(self):
return len(self.data)
def asr_test_transform(ds_metainfo):
assert (ds_metainfo is not None)
return transforms.Compose([])
| 1,358
| 26.18
| 68
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/cifar10_cls_dataset.py
|
"""
CIFAR-10 classification dataset.
"""
import os
import numpy as np
import mxnet as mx
from mxnet.gluon import Block
from mxnet.gluon.data.vision import CIFAR10
from mxnet.gluon.data.vision import transforms
from .dataset_metainfo import DatasetMetaInfo
class CIFAR10Fine(CIFAR10):
"""
CIFAR-10 image classification dataset.
Parameters:
----------
root : str, default $MXNET_HOME/datasets/cifar10
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A user defined callback that transforms each sample.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "cifar10"),
mode="train",
transform=None):
super(CIFAR10Fine, self).__init__(
root=root,
train=(mode == "train"),
transform=transform)
class CIFAR10MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CIFAR10MetaInfo, self).__init__()
self.label = "CIFAR10"
self.short_label = "cifar"
self.root_dir_name = "cifar10"
self.dataset_class = CIFAR10Fine
self.num_training_samples = 50000
self.in_channels = 3
self.num_classes = 10
self.input_image_size = (32, 32)
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.train_transform = cifar10_train_transform
self.val_transform = cifar10_val_transform
self.test_transform = cifar10_val_transform
self.ml_type = "imgcls"
self.loss_name = "SoftmaxCrossEntropy"
class RandomCrop(Block):
"""
Randomly crop `src` with `size` (width, height).
Padding is optional.
Upsample result if `src` is smaller than `size`.
Parameters:
----------
size : int or tuple of (W, H)
Size of the final output.
pad: int or tuple, default None
if int, size of the zero-padding
if tuple, number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all axes.
interpolation : int, default 2
Interpolation method for resizing. By default uses bilinear
interpolation. See OpenCV's resize function for available choices.
"""
def __init__(self,
size,
pad=None,
interpolation=2):
super(RandomCrop, self).__init__()
numeric_types = (float, int, np.generic)
if isinstance(size, numeric_types):
size = (size, size)
self._args = (size, interpolation)
if isinstance(pad, int):
self.pad = ((pad, pad), (pad, pad), (0, 0))
else:
self.pad = pad
def forward(self, x):
if self.pad:
x_pad = np.pad(x.asnumpy(), self.pad, mode="constant", constant_values=0)
return mx.image.random_crop(mx.nd.array(x_pad), *self._args)[0]
def cifar10_train_transform(ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010),
jitter_param=0.4,
lighting_param=0.1):
assert (ds_metainfo is not None)
assert (ds_metainfo.input_image_size[0] == 32)
return transforms.Compose([
RandomCrop(
size=32,
pad=4),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
def cifar10_val_transform(ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010)):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
| 4,585
| 32.474453
| 91
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/gluon/datasets/librispeech_asr_dataset.py
|
"""
LibriSpeech ASR dataset.
"""
__all__ = ['LibriSpeech', 'LibriSpeechMetaInfo']
import os
import numpy as np
from .dataset_metainfo import DatasetMetaInfo
from .asr_dataset import AsrDataset, asr_test_transform
class LibriSpeech(AsrDataset):
"""
LibriSpeech dataset for Automatic Speech Recognition (ASR).
Parameters:
----------
root : str
Path to folder storing the dataset.
mode : str, default 'test'
'train', 'val', 'test', or 'demo'.
subset : str, default 'dev-clean'
Data subset.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="test",
subset="dev-clean",
transform=None):
super(LibriSpeech, self).__init__(
root=root,
mode=mode,
transform=transform)
self.vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)}
import soundfile
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
data_dir_path = os.path.join(root_dir_path, subset)
assert os.path.exists(data_dir_path)
for speaker_id in os.listdir(data_dir_path):
speaker_dir_path = os.path.join(data_dir_path, speaker_id)
for chapter_id in os.listdir(speaker_dir_path):
chapter_dir_path = os.path.join(speaker_dir_path, chapter_id)
transcript_file_path = os.path.join(chapter_dir_path, "{}-{}.trans.txt".format(speaker_id, chapter_id))
with open(transcript_file_path, "r") as f:
transcripts = dict(x.split(" ", maxsplit=1) for x in f.readlines())
for flac_file_name in os.listdir(chapter_dir_path):
if flac_file_name.endswith(".flac"):
wav_file_name = flac_file_name.replace(".flac", ".wav")
wav_file_path = os.path.join(chapter_dir_path, wav_file_name)
if not os.path.exists(wav_file_path):
flac_file_path = os.path.join(chapter_dir_path, flac_file_name)
pcm, sample_rate = soundfile.read(flac_file_path)
soundfile.write(wav_file_path, pcm, sample_rate)
text = transcripts[wav_file_name.replace(".wav", "")]
text = text.strip("\n ").lower()
text = np.array([vocabulary_dict[c] for c in text], dtype=np.long)
self.data.append((wav_file_path, text))
class LibriSpeechMetaInfo(DatasetMetaInfo):
def __init__(self):
super(LibriSpeechMetaInfo, self).__init__()
self.label = "LibriSpeech"
self.short_label = "ls"
self.root_dir_name = "LibriSpeech"
self.dataset_class = LibriSpeech
self.dataset_class_extra_kwargs = {"subset": "dev-clean"}
self.ml_type = "asr"
self.num_classes = 29
self.val_metric_extra_kwargs = [{"vocabulary": None}]
self.val_metric_capts = ["Val.WER"]
self.val_metric_names = ["WER"]
self.test_metric_extra_kwargs = [{"vocabulary": None}]
self.test_metric_capts = ["Test.WER"]
self.test_metric_names = ["WER"]
self.val_transform = asr_test_transform
self.test_transform = asr_test_transform
self.test_net_extra_kwargs = {"from_audio": True}
self.allow_hybridize = False
self.saver_acc_ind = 0
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(LibriSpeechMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--subset",
type=str,
default="dev-clean",
help="data subset")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(LibriSpeechMetaInfo, self).update(args)
self.dataset_class_extra_kwargs["subset"] = args.subset
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
vocabulary = dataset._data.vocabulary
self.num_classes = len(vocabulary) + 1
self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary
self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
| 5,226
| 36.604317
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/cub200_2011_cls_dataset.py
|
"""
CUB-200-2011 classification dataset.
"""
import os
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet.gluon.data import dataset
from .imagenet1k_cls_dataset import ImageNet1KMetaInfo
class CUB200_2011(dataset.Dataset):
"""
CUB-200-2011 fine-grained classification dataset.
Parameters:
----------
root : str, default '~/.mxnet/datasets/CUB_200_2011'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "CUB_200_2011"),
mode="train",
transform=None):
super(CUB200_2011, self).__init__()
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
images_file_name = "images.txt"
images_file_path = os.path.join(root_dir_path, images_file_name)
if not os.path.exists(images_file_path):
raise Exception("Images file doesn't exist: {}".format(images_file_name))
class_file_name = "image_class_labels.txt"
class_file_path = os.path.join(root_dir_path, class_file_name)
if not os.path.exists(class_file_path):
raise Exception("Image class file doesn't exist: {}".format(class_file_name))
split_file_name = "train_test_split.txt"
split_file_path = os.path.join(root_dir_path, split_file_name)
if not os.path.exists(split_file_path):
raise Exception("Split file doesn't exist: {}".format(split_file_name))
images_df = pd.read_csv(
images_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "image_path"],
dtype={"image_id": np.int32, "image_path": np.unicode})
class_df = pd.read_csv(
class_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "class_id"],
dtype={"image_id": np.int32, "class_id": np.uint8})
split_df = pd.read_csv(
split_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "split_flag"],
dtype={"image_id": np.int32, "split_flag": np.uint8})
df = images_df.join(class_df, rsuffix="_class_df").join(split_df, rsuffix="_split_df")
split_flag = 1 if mode == "train" else 0
subset_df = df[df.split_flag == split_flag]
self.image_ids = subset_df["image_id"].values.astype(np.int32)
self.class_ids = subset_df["class_id"].values.astype(np.int32) - 1
self.image_file_names = subset_df["image_path"].values.astype(np.unicode)
images_dir_name = "images"
self.images_dir_path = os.path.join(root_dir_path, images_dir_name)
assert os.path.exists(self.images_dir_path)
self._transform = transform
def __getitem__(self, index):
image_file_name = self.image_file_names[index]
image_file_path = os.path.join(self.images_dir_path, image_file_name)
img = mx.image.imread(image_file_path, flag=1)
label = int(self.class_ids[index])
if self._transform is not None:
return self._transform(img, label)
return img, label
def __len__(self):
return len(self.image_ids)
class CUB200MetaInfo(ImageNet1KMetaInfo):
def __init__(self):
super(CUB200MetaInfo, self).__init__()
self.label = "CUB200_2011"
self.short_label = "cub"
self.root_dir_name = "CUB_200_2011"
self.dataset_class = CUB200_2011
self.num_training_samples = None
self.num_classes = 200
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.test_net_extra_kwargs = {"aux": False}
self.load_ignore_extra = True
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(CUB200MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--no-aux",
dest="no_aux",
action="store_true",
help="no `aux` mode in model")
def update(self,
args):
super(CUB200MetaInfo, self).update(args)
if args.no_aux:
self.test_net_extra_kwargs = None
self.load_ignore_extra = False
| 4,865
| 35.586466
| 94
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/mcv_asr_dataset.py
|
"""
Mozilla Common Voice ASR dataset.
"""
__all__ = ['McvDataset', 'McvMetaInfo']
import os
import re
import numpy as np
import pandas as pd
from .dataset_metainfo import DatasetMetaInfo
from .asr_dataset import AsrDataset, asr_test_transform
class McvDataset(AsrDataset):
"""
Mozilla Common Voice dataset for Automatic Speech Recognition (ASR).
Parameters:
----------
root : str, default '~/.torch/datasets/mcv'
Path to the folder stored the dataset.
mode : str, default 'test'
'train', 'val', 'test', or 'demo'.
lang : str, default 'en'
Language.
subset : str, default 'dev'
Data subset.
transform : function, default None
A function that takes data and transforms it.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "mcv"),
mode="test",
lang="en",
subset="dev",
transform=None):
super(McvDataset, self).__init__(
root=root,
mode=mode,
transform=transform)
assert (lang in ("en", "fr", "de", "it", "es", "ca", "pl", "ru", "ru34"))
self.vocabulary = self.get_vocabulary_for_lang(lang=lang)
desired_audio_sample_rate = 16000
vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)}
import soundfile
import librosa
from librosa.core import resample as lr_resample
import unicodedata
import unidecode
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
lang_ = lang if lang != "ru34" else "ru"
data_dir_path = os.path.join(root_dir_path, lang_)
assert os.path.exists(data_dir_path)
metainfo_file_path = os.path.join(data_dir_path, subset + ".tsv")
assert os.path.exists(metainfo_file_path)
metainfo_df = pd.read_csv(
metainfo_file_path,
sep="\t",
header=0,
index_col=False)
metainfo_df = metainfo_df[["path", "sentence"]]
self.data_paths = metainfo_df["path"].values
self.data_sentences = metainfo_df["sentence"].values
clips_dir_path = os.path.join(data_dir_path, "clips")
assert os.path.exists(clips_dir_path)
for clip_file_name, sentence in zip(self.data_paths, self.data_sentences):
mp3_file_path = os.path.join(clips_dir_path, clip_file_name)
assert os.path.exists(mp3_file_path)
wav_file_name = clip_file_name.replace(".mp3", ".wav")
wav_file_path = os.path.join(clips_dir_path, wav_file_name)
# print("==> {}".format(sentence))
text = sentence.lower()
if lang == "en":
text = re.sub("\.|-|–|—", " ", text)
text = re.sub("&", " and ", text)
text = re.sub("ō", "o", text)
text = re.sub("â|á", "a", text)
text = re.sub("é", "e", text)
text = re.sub(",|;|:|!|\?|\"|“|”|‘|’|\(|\)", "", text)
text = re.sub("\s+", " ", text)
text = re.sub(" '", " ", text)
text = re.sub("' ", " ", text)
elif lang == "fr":
text = "".join(c for c in text if unicodedata.combining(c) == 0)
text = re.sub("\.|-|–|—|=|×|\*|†|/|ቀ|_|…", " ", text)
text = re.sub(",|;|:|!|\?|ʻ|“|”|\"|„|«|»|\(|\)", "", text)
text = re.sub("먹|삼|생|고|기|집|\$|ʔ|の|ひ", "", text)
text = re.sub("’|´", "'", text)
text = re.sub("&", " and ", text)
text = re.sub("œ", "oe", text)
text = re.sub("æ", "ae", text)
text = re.sub("á|ā|ã|ä|ą|ă|å", "a", text)
text = re.sub("ö|ō|ó|ð|ổ|ø", "o", text)
text = re.sub("ē|ė|ę", "e", text)
text = re.sub("í|ī", "i", text)
text = re.sub("ú|ū", "u", text)
text = re.sub("ý", "y", text)
text = re.sub("š|ś|ș|ş", "s", text)
text = re.sub("ž|ź|ż", "z", text)
text = re.sub("ñ|ń|ṇ", "n", text)
text = re.sub("ł|ľ", "l", text)
text = re.sub("ć|č", "c", text)
text = re.sub("я", "ya", text)
text = re.sub("ř", "r", text)
text = re.sub("đ", "d", text)
text = re.sub("ț", "t", text)
text = re.sub("þ", "th", text)
text = re.sub("ğ", "g", text)
text = re.sub("ß", "ss", text)
text = re.sub("µ", "mu", text)
text = re.sub("\s+", " ", text)
elif lang == "de":
text = re.sub("\.|-|–|—|/|_|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|'|‘|’|ʻ|ʿ|‚|“|”|\"|„|«|»|›|‹|\(|\)", "", text)
text = re.sub("°|幺|乡|辶", "", text)
text = re.sub("&", " and ", text)
text = re.sub("ə", "a", text)
text = re.sub("æ", "ae", text)
text = re.sub("å|ā|á|ã|ă|â|ą", "a", text)
text = re.sub("ó|ð|ø|ọ|ő|ō|ô", "o", text)
text = re.sub("é|ë|ê|ě|ę", "e", text)
text = re.sub("ū|ứ", "u", text)
text = re.sub("í|ï|ı", "i", text)
text = re.sub("š|ș|ś|ş", "s", text)
text = re.sub("č|ć", "c", text)
text = re.sub("đ", "d", text)
text = re.sub("ğ", "g", text)
text = re.sub("ł", "l", text)
text = re.sub("ř", "r", text)
text = re.sub("ñ", "n", text)
text = re.sub("ț", "t", text)
text = re.sub("ž|ź", "z", text)
text = re.sub("\s+", " ", text)
elif lang == "it":
text = re.sub("\.|-|–|—|/|_|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)", "", text)
text = re.sub("\$|#|禅", "", text)
text = re.sub("’|`", "'", text)
text = re.sub("ə", "a", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang == "es":
text = re.sub("\.|-|–|—|/|=|_|{|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)|¿|¡", "", text)
text = re.sub("蝦|夷", "", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang == "ca":
text = re.sub("\.|-|–|—|/|=|_|·|@|\+|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)|¿|¡", "", text)
text = re.sub("ঃ|ং", "", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang == "pl":
text = re.sub("\.|-|–|—|/|=|_|·|@|\+|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)", "", text)
text = re.sub("q", "k", text)
text = re.sub("x", "ks", text)
text = re.sub("v", "w", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang in ("ru", "ru34"):
text = re.sub("по-", "по", text)
text = re.sub("во-", "во", text)
text = re.sub("-то", "то", text)
text = re.sub("\.|−|-|–|—|…", " ", text)
text = re.sub(",|;|:|!|\?|‘|’|\"|“|”|«|»|'", "", text)
text = re.sub("m", "м", text)
text = re.sub("o", "о", text)
text = re.sub("z", "з", text)
text = re.sub("i", "и", text)
text = re.sub("l", "л", text)
text = re.sub("a", "а", text)
text = re.sub("f", "ф", text)
text = re.sub("r", "р", text)
text = re.sub("e", "е", text)
text = re.sub("x", "кс", text)
text = re.sub("h", "х", text)
text = re.sub("\s+", " ", text)
if lang == "ru34":
text = re.sub("ё", "е", text)
text = re.sub(" $", "", text)
# print("<== {}".format(text))
text = np.array([vocabulary_dict[c] for c in text], dtype=np.long)
self.data.append((wav_file_path, text))
# continue
if os.path.exists(wav_file_path):
continue
# pass
x, sr = librosa.load(path=mp3_file_path, sr=None)
if desired_audio_sample_rate != sr:
y = lr_resample(y=x, orig_sr=sr, target_sr=desired_audio_sample_rate)
soundfile.write(file=wav_file_path, data=y, samplerate=desired_audio_sample_rate)
@staticmethod
def get_vocabulary_for_lang(lang="en"):
"""
Get the vocabulary for a language.
Parameters:
----------
lang : str, default 'en'
Language.
Returns:
-------
list of str
Vocabulary set.
"""
assert (lang in ("en", "fr", "de", "it", "es", "ca", "pl", "ru", "ru34"))
if lang == "en":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
elif lang == "fr":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
elif lang == "de":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
elif lang == "it":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
elif lang == "es":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
elif lang == "ca":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
elif lang == "pl":
return [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
elif lang == "ru":
return [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
elif lang == "ru34":
return [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
else:
return None
class McvMetaInfo(DatasetMetaInfo):
def __init__(self):
super(McvMetaInfo, self).__init__()
self.label = "MCV"
self.short_label = "mcv"
self.root_dir_name = "cv-corpus-6.1-2020-12-11"
self.dataset_class = McvDataset
self.lang = "en"
self.dataset_class_extra_kwargs = {
"lang": self.lang,
"subset": "dev"}
self.ml_type = "asr"
self.num_classes = None
self.val_metric_extra_kwargs = [{"vocabulary": None}]
self.val_metric_capts = ["Val.WER"]
self.val_metric_names = ["WER"]
self.test_metric_extra_kwargs = [{"vocabulary": None}]
self.test_metric_capts = ["Test.WER"]
self.test_metric_names = ["WER"]
self.val_transform = asr_test_transform
self.test_transform = asr_test_transform
self.saver_acc_ind = 0
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(McvMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--lang",
type=str,
default="en",
help="language")
parser.add_argument(
"--subset",
type=str,
default="dev",
help="data subset")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(McvMetaInfo, self).update(args)
self.lang = args.lang
self.dataset_class_extra_kwargs["lang"] = args.lang
self.dataset_class_extra_kwargs["subset"] = args.subset
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
vocabulary = dataset._data.vocabulary
self.num_classes = len(vocabulary) + 1
self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary
self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
| 14,293
| 41.924925
| 119
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/cityscapes_seg_dataset.py
|
"""
Cityscapes semantic segmentation dataset.
"""
import os
import numpy as np
import mxnet as mx
from PIL import Image
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class CityscapesSegDataset(SegDataset):
"""
Cityscapes semantic segmentation dataset.
Parameters:
----------
root : str
Path to a folder with `leftImg8bit` and `gtFine` subfolders.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CityscapesSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
image_dir_path = os.path.join(root, "leftImg8bit")
mask_dir_path = os.path.join(root, "gtFine")
assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), "Please prepare dataset"
mode_dir_name = "train" if mode == "train" else "val"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
# mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_subdir_path, _, image_file_names in os.walk(image_dir_path):
for image_file_name in image_file_names:
if image_file_name.endswith(".png"):
image_file_path = os.path.join(image_subdir_path, image_file_name)
mask_file_name = image_file_name.replace("leftImg8bit", "gtFine_labelIds")
mask_subdir_path = image_subdir_path.replace("leftImg8bit", "gtFine")
mask_file_path = os.path.join(mask_subdir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(image_dir_path))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == "train":
image, mask = self._train_sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert (self.mode == "test")
image = self._img_transform(image)
mask = self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 19
vague_idx = 19
use_vague = True
background_idx = -1
ignore_bg = False
_key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
_mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32)
@staticmethod
def _class_to_index(mask):
values = np.unique(mask)
for value in values:
assert(value in CityscapesSegDataset._mapping)
index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True)
return CityscapesSegDataset._key[index].reshape(mask.shape)
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask = CityscapesSegDataset._class_to_index(np_mask)
np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx
return mx.nd.array(np_mask, mx.cpu())
def __len__(self):
return len(self.images)
class CityscapesMetaInfo(VOCMetaInfo):
def __init__(self):
super(CityscapesMetaInfo, self).__init__()
self.label = "Cityscapes"
self.short_label = "voc"
self.root_dir_name = "cityscapes"
self.dataset_class = CityscapesSegDataset
self.num_classes = CityscapesSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"macro_average": False},
{"num_classes": CityscapesSegDataset.classes,
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"bg_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg,
"macro_average": False}]
| 5,110
| 36.306569
| 105
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/coco_seg_dataset.py
|
"""
COCO semantic segmentation dataset.
"""
import os
import logging
import numpy as np
import mxnet as mx
from PIL import Image
from tqdm import trange
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class CocoSegDataset(SegDataset):
"""
COCO semantic segmentation dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CocoSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
year = "2017"
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "instances_" + mode_name + year + ".json")
idx_file_path = os.path.join(annotations_dir_path, mode_name + "_idx.npy")
self.image_dir_path = os.path.join(root, mode_name + year)
from pycocotools.coco import COCO
from pycocotools import mask as coco_mask
self.coco = COCO(annotations_file_path)
self.coco_mask = coco_mask
if os.path.exists(idx_file_path):
self.idx = np.load(idx_file_path)
else:
idx_list = list(self.coco.imgs.keys())
self.idx = self._filter_idx(idx_list, idx_file_path)
def __getitem__(self, index):
image_id = int(self.idx[index])
image_metadata = self.coco.loadImgs(image_id)[0]
image_file_name = image_metadata["file_name"]
image_file_path = os.path.join(self.image_dir_path, image_file_name)
image = Image.open(image_file_path).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(image_file_path)
coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=image_id))
mask = Image.fromarray(self._gen_seg_mask(
target=coco_target,
height=image_metadata["height"],
width=image_metadata["width"]))
if self.mode == "train":
image, mask = self._train_sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert (self.mode == "test")
image, mask = self._img_transform(image), self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
def _gen_seg_mask(self, target, height, width):
cat_list = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72]
mask = np.zeros((height, width), dtype=np.uint8)
for instance in target:
rle = self.coco_mask.frPyObjects(instance["segmentation"], height, width)
m = self.coco_mask.decode(rle)
cat = instance["category_id"]
if cat in cat_list:
c = cat_list.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def _filter_idx(self,
idx_list,
idx_file_path,
pixels_thr=1000):
logging.info("Filtering mask index:")
tbar = trange(len(idx_list))
filtered_idx = []
for i in tbar:
img_id = idx_list[i]
coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(
coco_target,
img_metadata["height"],
img_metadata["width"])
if (mask > 0).sum() > pixels_thr:
filtered_idx.append(img_id)
tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx_list), len(filtered_idx)))
logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
np.save(idx_file_path, np.array(filtered_idx, np.int32))
return filtered_idx
classes = 21
vague_idx = -1
use_vague = False
background_idx = 0
ignore_bg = True
@staticmethod
def _mask_transform(mask, ctx=mx.cpu()):
np_mask = np.array(mask).astype(np.int32)
# print("min={}, max={}".format(np_mask.min(), np_mask.max()))
return mx.nd.array(np_mask, ctx=ctx)
def __len__(self):
return len(self.idx)
class CocoSegMetaInfo(VOCMetaInfo):
def __init__(self):
super(CocoSegMetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoSegDataset
self.num_classes = CocoSegDataset.classes
self.train_metric_extra_kwargs = [
{"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"macro_average": False,
"aux": self.train_aux}]
self.val_metric_extra_kwargs = [
{"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"macro_average": False},
{"num_classes": CocoSegDataset.classes,
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"bg_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg,
"macro_average": False}]
self.test_metric_extra_kwargs = self.val_metric_extra_kwargs
| 6,102
| 35.54491
| 117
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/voc_seg_dataset.py
|
"""
Pascal VOC2012 semantic segmentation dataset.
"""
import os
import numpy as np
import mxnet as mx
from PIL import Image
from mxnet.gluon.data.vision import transforms
from .seg_dataset import SegDataset
from .dataset_metainfo import DatasetMetaInfo
class VOCSegDataset(SegDataset):
"""
Pascal VOC2012 semantic segmentation dataset.
Parameters:
----------
root : str
Path to VOCdevkit folder.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(VOCSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
base_dir_path = os.path.join(root, "VOC2012")
image_dir_path = os.path.join(base_dir_path, "JPEGImages")
mask_dir_path = os.path.join(base_dir_path, "SegmentationClass")
splits_dir_path = os.path.join(base_dir_path, "ImageSets", "Segmentation")
if mode == "train":
split_file_path = os.path.join(splits_dir_path, "train.txt")
elif mode in ("val", "test", "demo"):
split_file_path = os.path.join(splits_dir_path, "val.txt")
else:
raise RuntimeError("Unknown dataset splitting mode")
self.images = []
self.masks = []
with open(os.path.join(split_file_path), "r") as lines:
for line in lines:
image_file_path = os.path.join(image_dir_path, line.rstrip('\n') + ".jpg")
assert os.path.isfile(image_file_path)
self.images.append(image_file_path)
mask_file_path = os.path.join(mask_dir_path, line.rstrip('\n') + ".png")
assert os.path.isfile(mask_file_path)
self.masks.append(mask_file_path)
assert (len(self.images) == len(self.masks))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == "train":
image, mask = self._train_sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert self.mode == "test"
image, mask = self._img_transform(image), self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 21
vague_idx = 255
use_vague = True
background_idx = 0
ignore_bg = True
@staticmethod
def _mask_transform(mask, ctx=mx.cpu()):
np_mask = np.array(mask).astype(np.int32)
# np_mask[np_mask == 255] = VOCSegDataset.vague_idx
return mx.nd.array(np_mask, ctx=ctx)
def __len__(self):
return len(self.images)
def voc_transform(ds_metainfo,
mean_rgb=(0.485, 0.456, 0.406),
std_rgb=(0.229, 0.224, 0.225)):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
class VOCMetaInfo(DatasetMetaInfo):
def __init__(self):
super(VOCMetaInfo, self).__init__()
self.label = "VOC"
self.short_label = "voc"
self.root_dir_name = "voc"
self.dataset_class = VOCSegDataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = VOCSegDataset.classes
self.train_aux = False
self.input_image_size = (480, 480)
self.train_metric_capts = ["Train.PixAcc"]
self.train_metric_names = ["PixelAccuracyMetric"]
self.train_metric_extra_kwargs = [
{"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"macro_average": False,
"aux": self.train_aux}]
self.val_metric_capts = ["Val.PixAcc", "Val.IoU"]
self.val_metric_names = ["PixelAccuracyMetric", "MeanIoUMetric"]
self.val_metric_extra_kwargs = [
{"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"macro_average": False},
{"num_classes": VOCSegDataset.classes,
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"bg_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg,
"macro_average": False}]
self.test_metric_capts = ["Test.PixAcc", "Test.IoU"]
self.test_metric_names = self.val_metric_names
self.test_metric_extra_kwargs = self.val_metric_extra_kwargs
self.saver_acc_ind = 1
self.do_transform = True
self.train_transform = voc_transform
self.val_transform = voc_transform
self.test_transform = voc_transform
self.ml_type = "imgseg"
self.allow_hybridize = False
self.train_net_extra_kwargs = {"aux": self.train_aux}
self.test_net_extra_kwargs = {"aux": False, "fixed_size": False}
self.load_ignore_extra = True
self.image_base_size = 520
self.image_crop_size = 480
self.loss_name = "SegSoftmaxCrossEntropy"
self.loss_extra_kwargs = None
# self.loss_name = "MixSoftmaxCrossEntropy"
# self.loss_extra_kwargs = {"aux": self.train_aux, "aux_weight": 0.5}
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(VOCMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--image-base-size",
type=int,
default=520,
help="base image size")
parser.add_argument(
"--image-crop-size",
type=int,
default=480,
help="crop image size")
def update(self,
args):
super(VOCMetaInfo, self).update(args)
self.image_base_size = args.image_base_size
self.image_crop_size = args.image_crop_size
| 6,554
| 34.625
| 90
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/cifar100_cls_dataset.py
|
"""
CIFAR-100 classification dataset.
"""
import os
from mxnet.gluon.data.vision import CIFAR100
from .cifar10_cls_dataset import CIFAR10MetaInfo
class CIFAR100Fine(CIFAR100):
"""
CIFAR-100 image classification dataset.
Parameters:
----------
root : str, default $MXNET_HOME/datasets/cifar100
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A user defined callback that transforms each sample.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "cifar100"),
mode="train",
transform=None):
super(CIFAR100Fine, self).__init__(
root=root,
fine_label=True,
train=(mode == "train"),
transform=transform)
class CIFAR100MetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(CIFAR100MetaInfo, self).__init__()
self.label = "CIFAR100"
self.root_dir_name = "cifar100"
self.dataset_class = CIFAR100Fine
self.num_classes = 100
| 1,133
| 26
| 74
|
py
|
imgclsmob
|
imgclsmob-master/gluon/datasets/hpatches_mch_dataset.py
|
"""
HPatches image matching dataset.
"""
import os
import cv2
import numpy as np
import mxnet as mx
from mxnet.gluon.data import dataset
from mxnet.gluon.data.vision import transforms
from .dataset_metainfo import DatasetMetaInfo
class HPatches(dataset.Dataset):
"""
HPatches (full image sequences) image matching dataset.
Info URL: https://github.com/hpatches/hpatches-dataset
Data URL: http://icvl.ee.ic.ac.uk/vbalnt/hpatches/hpatches-sequences-release.tar.gz
Parameters:
----------
root : str, default '~/.mxnet/datasets/hpatches'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
alteration : str, default 'all'
'all', 'i' for illumination or 'v' for viewpoint.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "hpatches"),
mode="train",
alteration="all",
transform=None):
super(HPatches, self).__init__()
assert os.path.exists(root)
num_images = 5
image_file_ext = ".ppm"
self.mode = mode
self.image_paths = []
self.warped_image_paths = []
self.homographies = []
subdir_names = [name for name in os.listdir(root) if os.path.isdir(os.path.join(root, name))]
if alteration != "all":
subdir_names = [name for name in subdir_names if name[0] == alteration]
for subdir_name in subdir_names:
subdir_path = os.path.join(root, subdir_name)
for i in range(num_images):
k = i + 2
self.image_paths.append(os.path.join(subdir_path, "1" + image_file_ext))
self.warped_image_paths.append(os.path.join(subdir_path, str(k) + image_file_ext))
self.homographies.append(np.loadtxt(os.path.join(subdir_path, "H_1_" + str(k))))
self.transform = transform
def __getitem__(self, index):
# image = cv2.imread(self.image_paths[index], flags=cv2.IMREAD_GRAYSCALE)
# warped_image = cv2.imread(self.warped_image_paths[index], flags=cv2.IMREAD_GRAYSCALE)
# image = mx.image.imread(self.image_paths[index], flag=0)
# warped_image = mx.image.imread(self.warped_image_paths[index], flag=0)
print("Image file name: {}, index: {}".format(self.image_paths[index], index))
image = cv2.imread(self.image_paths[index], flags=0)
if image.shape[0] > 1500:
image = cv2.resize(
src=image,
dsize=None,
fx=0.5,
fy=0.5,
interpolation=cv2.INTER_AREA)
image = mx.nd.array(np.expand_dims(image, axis=2))
print("Image shape: {}".format(image.shape))
warped_image = cv2.imread(self.warped_image_paths[index], flags=0)
if warped_image.shape[0] > 1500:
warped_image = cv2.resize(
src=warped_image,
dsize=None,
fx=0.5,
fy=0.5,
interpolation=cv2.INTER_AREA)
warped_image = mx.nd.array(np.expand_dims(warped_image, axis=2))
print("W-Image shape: {}".format(warped_image.shape))
homography = mx.nd.array(self.homographies[index])
if self.transform is not None:
image = self.transform(image)
warped_image = self.transform(warped_image)
return image, warped_image, homography
def __len__(self):
return len(self.image_paths)
class HPatchesMetaInfo(DatasetMetaInfo):
def __init__(self):
super(HPatchesMetaInfo, self).__init__()
self.label = "hpatches"
self.short_label = "hpatches"
self.root_dir_name = "hpatches"
self.dataset_class = HPatches
self.ml_type = "imgmch"
self.do_transform = True
self.val_transform = hpatches_val_transform
self.test_transform = hpatches_val_transform
self.allow_hybridize = False
self.test_net_extra_kwargs = {"hybridizable": False, "in_size": None}
def hpatches_val_transform(ds_metainfo):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor()
])
def _test():
dataset = HPatches(
root="../imgclsmob_data/hpatches",
mode="train",
alteration="i",
transform=None)
scale_factor = 0.5
for image, warped_image, _ in dataset:
cv2.imshow(
winname="image",
mat=cv2.resize(
src=image,
dsize=None,
fx=scale_factor,
fy=scale_factor,
interpolation=cv2.INTER_NEAREST))
cv2.imshow(
winname="warped_image",
mat=cv2.resize(
src=warped_image,
dsize=None,
fx=scale_factor,
fy=scale_factor,
interpolation=cv2.INTER_NEAREST))
cv2.waitKey(0)
assert (dataset is not None)
if __name__ == "__main__":
_test()
| 5,163
| 33.198675
| 101
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/dataset_utils.py
|
"""
Dataset routines.
"""
__all__ = ['get_dataset_metainfo', 'get_train_data_source', 'get_val_data_source', 'get_test_data_source']
from .datasets.imagenet1k_cls_dataset import ImageNet1KMetaInfo
from .datasets.cub200_2011_cls_dataset import CUB200MetaInfo
from .datasets.cifar10_cls_dataset import CIFAR10MetaInfo
from .datasets.cifar100_cls_dataset import CIFAR100MetaInfo
from .datasets.svhn_cls_dataset import SVHNMetaInfo
from .datasets.voc_seg_dataset import VOCMetaInfo
from .datasets.ade20k_seg_dataset import ADE20KMetaInfo
from .datasets.cityscapes_seg_dataset import CityscapesMetaInfo
from .datasets.coco_seg_dataset import CocoSegMetaInfo
from .datasets.coco_det_dataset import CocoDetMetaInfo
from .datasets.coco_hpe1_dataset import CocoHpe1MetaInfo
from .datasets.coco_hpe2_dataset import CocoHpe2MetaInfo
from .datasets.coco_hpe3_dataset import CocoHpe3MetaInfo
from .datasets.hpatches_mch_dataset import HPatchesMetaInfo
from .datasets.librispeech_asr_dataset import LibriSpeechMetaInfo
from .datasets.mcv_asr_dataset import McvMetaInfo
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
def get_dataset_metainfo(dataset_name):
"""
Get dataset metainfo by name of dataset.
Parameters:
----------
dataset_name : str
Dataset name.
Returns:
-------
DatasetMetaInfo
Dataset metainfo.
"""
dataset_metainfo_map = {
"ImageNet1K": ImageNet1KMetaInfo,
"CUB200_2011": CUB200MetaInfo,
"CIFAR10": CIFAR10MetaInfo,
"CIFAR100": CIFAR100MetaInfo,
"SVHN": SVHNMetaInfo,
"VOC": VOCMetaInfo,
"ADE20K": ADE20KMetaInfo,
"Cityscapes": CityscapesMetaInfo,
"CocoSeg": CocoSegMetaInfo,
"CocoDet": CocoDetMetaInfo,
"CocoHpe1": CocoHpe1MetaInfo,
"CocoHpe2": CocoHpe2MetaInfo,
"CocoHpe3": CocoHpe3MetaInfo,
"HPatches": HPatchesMetaInfo,
"LibriSpeech": LibriSpeechMetaInfo,
"MCV": McvMetaInfo,
}
if dataset_name in dataset_metainfo_map.keys():
return dataset_metainfo_map[dataset_name]()
else:
raise Exception("Unrecognized dataset: {}".format(dataset_name))
def get_train_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader
Data source.
"""
transform_train = ds_metainfo.train_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="train",
transform=transform_train,
**kwargs)
ds_metainfo.update_from_dataset(dataset)
if not ds_metainfo.train_use_weighted_sampler:
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
else:
sampler = WeightedRandomSampler(
weights=dataset.sample_weights,
num_samples=len(dataset))
return DataLoader(
dataset=dataset,
batch_size=batch_size,
# shuffle=True,
sampler=sampler,
num_workers=num_workers,
pin_memory=True)
def get_val_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader
Data source.
"""
transform_val = ds_metainfo.val_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=transform_val,
**kwargs)
ds_metainfo.update_from_dataset(dataset)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
def get_test_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for testing subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader
Data source.
"""
transform_test = ds_metainfo.test_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=transform_test,
**kwargs)
ds_metainfo.update_from_dataset(dataset)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
| 5,563
| 29.404372
| 113
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/model_stats.py
|
"""
Routines for model statistics calculation.
"""
import logging
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .pytorchcv.models.common import ChannelShuffle, ChannelShuffle2, Identity, Flatten, Swish, HSigmoid, HSwish,\
InterpolationBlock, HeatmapMaxDetBlock
from .pytorchcv.models.fishnet import ChannelSqueeze
from .pytorchcv.models.irevnet import IRevDownscale, IRevSplitBlock, IRevMergeBlock
from .pytorchcv.models.rir_cifar import RiRFinalBlock
from .pytorchcv.models.proxylessnas import ProxylessUnit
from .pytorchcv.models.lwopenpose_cmupan import LwopDecoderFinalBlock
from .pytorchcv.models.centernet import CenterNetHeatmapMaxDet
from .pytorchcv.models.danet import ScaleBlock
from .pytorchcv.models.jasper import MaskConv1d, NemoMelSpecExtractor
__all__ = ['measure_model']
def calc_block_num_params2(net):
"""
Calculate number of trainable parameters in the block (not iterative).
Parameters:
----------
net : Module
Model/block.
Returns:
-------
int
Number of parameters.
"""
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def calc_block_num_params(module):
"""
Calculate number of trainable parameters in the block (iterative).
Parameters:
----------
module : Module
Model/block.
Returns:
-------
int
Number of parameters.
"""
assert isinstance(module, nn.Module)
net_params = filter(lambda p: isinstance(p[1], nn.parameter.Parameter) and p[1].requires_grad,
module._parameters.items())
weight_count = 0
for param in net_params:
weight_count += np.prod(param[1].size())
return weight_count
def measure_model(model,
in_shapes):
"""
Calculate model statistics.
Parameters:
----------
model : HybridBlock
Tested model.
in_shapes : list of tuple of ints
Shapes of the input tensors.
"""
global num_flops
global num_macs
global num_params
# global names
num_flops = 0
num_macs = 0
num_params = 0
# names = {}
def call_hook(module, x, y):
if not (isinstance(module, IRevSplitBlock) or isinstance(module, IRevMergeBlock) or
isinstance(module, RiRFinalBlock) or isinstance(module, InterpolationBlock) or
isinstance(module, MaskConv1d) or isinstance(module, NemoMelSpecExtractor)):
assert (len(x) == 1)
assert (len(module._modules) == 0)
if isinstance(module, nn.Linear):
batch = x[0].shape[0]
in_units = module.in_features
out_units = module.out_features
extra_num_macs = in_units * out_units
if module.bias is None:
extra_num_flops = (2 * in_units - 1) * out_units
else:
extra_num_flops = 2 * in_units * out_units
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(module, nn.ReLU):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.ELU):
extra_num_flops = 3 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.Sigmoid):
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.LeakyReLU):
extra_num_flops = 2 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.ReLU6):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.PReLU):
extra_num_flops = 3 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, Swish):
extra_num_flops = 5 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, HSigmoid):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, HSwish):
extra_num_flops = 2 * x[0].numel()
extra_num_macs = 0
elif type(module) in [nn.ConvTranspose2d]:
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif type(module) in [nn.Conv2d]:
batch = x[0].shape[0]
x_h = x[0].shape[2]
x_w = x[0].shape[3]
kernel_size = module.kernel_size
stride = module.stride
dilation = module.dilation
padding = module.padding
groups = module.groups
in_channels = module.in_channels
out_channels = module.out_channels
y_h = (x_h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // stride[0] + 1
y_w = (x_w + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // stride[1] + 1
assert (out_channels == y.shape[1])
assert (y_h == y.shape[2])
assert (y_w == y.shape[3])
kernel_total_size = kernel_size[0] * kernel_size[1]
y_size = y_h * y_w
extra_num_macs = kernel_total_size * in_channels * y_size * out_channels // groups
if module.bias is None:
extra_num_flops = (2 * kernel_total_size * y_size - 1) * in_channels * out_channels // groups
else:
extra_num_flops = 2 * kernel_total_size * in_channels * y_size * out_channels // groups
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(module, nn.BatchNorm2d):
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.InstanceNorm2d):
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.BatchNorm1d):
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif type(module) in [nn.MaxPool2d, nn.AvgPool2d]:
assert (x[0].shape[1] == y.shape[1])
batch = x[0].shape[0]
kernel_size = module.kernel_size if isinstance(module.kernel_size, tuple) else\
(module.kernel_size, module.kernel_size)
y_h = y.shape[2]
y_w = y.shape[3]
channels = x[0].shape[1]
y_size = y_h * y_w
pool_total_size = kernel_size[0] * kernel_size[1]
extra_num_flops = channels * y_size * pool_total_size
extra_num_macs = 0
extra_num_flops *= batch
extra_num_macs *= batch
elif type(module) in [nn.AdaptiveAvgPool2d, nn.AdaptiveMaxPool2d]:
assert (x[0].shape[1] == y.shape[1])
batch = x[0].shape[0]
x_h = x[0].shape[2]
x_w = x[0].shape[3]
y_h = y.shape[2]
y_w = y.shape[3]
channels = x[0].shape[1]
y_size = y_h * y_w
pool_total_size = x_h * x_w
extra_num_flops = channels * y_size * pool_total_size
extra_num_macs = 0
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(module, nn.Dropout):
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(module, nn.Sequential):
assert (len(module._modules) == 0)
extra_num_flops = 0
extra_num_macs = 0
elif type(module) in [ChannelShuffle, ChannelShuffle2]:
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, nn.ZeroPad2d):
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(module, Identity):
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(module, nn.PixelShuffle):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, Flatten):
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(module, nn.Upsample):
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, ChannelSqueeze):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, IRevDownscale):
extra_num_flops = 5 * x[0].numel()
extra_num_macs = 0
elif isinstance(module, IRevSplitBlock):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, IRevMergeBlock):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, RiRFinalBlock):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif isinstance(module, ProxylessUnit):
extra_num_flops = x[0].numel()
extra_num_macs = 0
elif type(module) in [nn.Softmax2d, nn.Softmax]:
extra_num_flops = 4 * x[0].numel()
extra_num_macs = 0
elif type(module) in [MaskConv1d, nn.Conv1d]:
if isinstance(y, tuple):
assert isinstance(module, MaskConv1d)
y = y[0]
batch = x[0].shape[0]
x_h = x[0].shape[2]
kernel_size = module.kernel_size
stride = module.stride
dilation = module.dilation
padding = module.padding
groups = module.groups
in_channels = module.in_channels
out_channels = module.out_channels
y_h = (x_h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // stride[0] + 1
assert (out_channels == y.shape[1])
assert (y_h == y.shape[2])
kernel_total_size = kernel_size[0]
y_size = y_h
extra_num_macs = kernel_total_size * in_channels * y_size * out_channels // groups
if module.bias is None:
extra_num_flops = (2 * kernel_total_size * y_size - 1) * in_channels * out_channels // groups
else:
extra_num_flops = 2 * kernel_total_size * in_channels * y_size * out_channels // groups
extra_num_flops *= batch
extra_num_macs *= batch
elif type(module) in [InterpolationBlock, HeatmapMaxDetBlock, CenterNetHeatmapMaxDet, ScaleBlock,
NemoMelSpecExtractor]:
extra_num_flops, extra_num_macs = module.calc_flops(x[0])
elif isinstance(module, LwopDecoderFinalBlock):
if not module.calc_3d_features:
extra_num_flops = 0
extra_num_macs = 0
else:
raise TypeError("LwopDecoderFinalBlock!")
else:
raise TypeError("Unknown layer type: {}".format(type(module)))
global num_flops
global num_macs
global num_params
# global names
num_flops += extra_num_flops
num_macs += extra_num_macs
# if module.name not in names:
# names[module.name] = 1
# num_params += calc_block_num_params(module)
num_params += calc_block_num_params(module)
def register_forward_hooks(a_module):
if len(a_module._modules) > 0:
assert (calc_block_num_params(a_module) == 0)
children_handles = []
for child_module in a_module._modules.values():
child_handles = register_forward_hooks(child_module)
children_handles += child_handles
return children_handles
else:
handle = a_module.register_forward_hook(call_hook)
return [handle]
hook_handles = register_forward_hooks(model)
model.eval()
if len(in_shapes) == 1:
x = Variable(torch.zeros(*in_shapes[0]))
model(x)
elif len(in_shapes) == 2:
x1 = Variable(torch.zeros(*in_shapes[0]))
x2 = Variable(torch.zeros(*in_shapes[1]))
model(x1, x2)
else:
raise NotImplementedError()
num_params1 = calc_block_num_params2(model)
if num_params != num_params1:
logging.warning(
"Calculated numbers of parameters are different: standard method: {},\tper-leaf method: {}".format(
num_params1, num_params))
[h.remove() for h in hook_handles]
return num_flops, num_macs, num_params1
| 12,391
| 37.01227
| 114
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/setup.py
|
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pytorchcv',
version='0.0.67',
description='Image classification and segmentation models for PyTorch',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='osemery@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification pytorch imagenet cifar svhn vgg resnet '
'pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 xdensnet squeezenet '
'squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet pnasnet ror '
'proxylessnas dianet efficientnet mixnet image-segmentation voc ade20k cityscapes coco pspnet deeplabv3 '
'fcn',
packages=find_packages(exclude=['datasets', 'metrics', 'others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy', 'requests'],
)
| 1,571
| 42.666667
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/utils.py
|
"""
Main routines shared between training and evaluation scripts.
"""
import logging
import os
import numpy as np
import torch.utils.data
from .pytorchcv.model_provider import get_model
from .metrics.metric import EvalMetric, CompositeEvalMetric
from .metrics.cls_metrics import Top1Error, TopKError
from .metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric
from .metrics.det_metrics import CocoDetMApMetric
from .metrics.hpe_metrics import CocoHpeOksApMetric
from .metrics.asr_metrics import WER
def prepare_pt_context(num_gpus,
batch_size):
"""
Correct batch size.
Parameters:
----------
num_gpus : int
Number of GPU.
batch_size : int
Batch size for each GPU.
Returns:
-------
bool
Whether to use CUDA.
int
Batch size for all GPUs.
"""
use_cuda = (num_gpus > 0)
batch_size *= max(1, num_gpus)
return use_cuda, batch_size
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path,
use_cuda,
use_data_parallel=True,
net_extra_kwargs=None,
load_ignore_extra=False,
num_classes=None,
in_channels=None,
remap_to_cpu=False,
remove_module=False):
"""
Create and initialize model by name.
Parameters:
----------
model_name : str
Model name.
use_pretrained : bool
Whether to use pretrained weights.
pretrained_model_file_path : str
Path to file with pretrained weights.
use_cuda : bool
Whether to use CUDA.
use_data_parallel : bool, default True
Whether to use parallelization.
net_extra_kwargs : dict, default None
Extra parameters for model.
load_ignore_extra : bool, default False
Whether to ignore extra layers in pretrained model.
num_classes : int, default None
Number of classes.
in_channels : int, default None
Number of input channels.
remap_to_cpu : bool, default False
Whether to remape model to CPU during loading.
remove_module : bool, default False
Whether to remove module from loaded model.
Returns:
-------
Module
Model.
"""
kwargs = {"pretrained": use_pretrained}
if num_classes is not None:
kwargs["num_classes"] = num_classes
if in_channels is not None:
kwargs["in_channels"] = in_channels
if net_extra_kwargs is not None:
kwargs.update(net_extra_kwargs)
net = get_model(model_name, **kwargs)
if pretrained_model_file_path:
assert (os.path.isfile(pretrained_model_file_path))
logging.info("Loading model: {}".format(pretrained_model_file_path))
checkpoint = torch.load(
pretrained_model_file_path,
map_location=(None if use_cuda and not remap_to_cpu else "cpu"))
if (type(checkpoint) == dict) and ("state_dict" in checkpoint):
checkpoint = checkpoint["state_dict"]
if load_ignore_extra:
pretrained_state = checkpoint
model_dict = net.state_dict()
pretrained_state = {k: v for k, v in pretrained_state.items() if k in model_dict}
net.load_state_dict(pretrained_state)
else:
if remove_module:
net_tmp = torch.nn.DataParallel(net)
net_tmp.load_state_dict(checkpoint)
net.load_state_dict(net_tmp.module.cpu().state_dict())
else:
net.load_state_dict(checkpoint)
if use_data_parallel and use_cuda:
net = torch.nn.DataParallel(net)
if use_cuda:
net = net.cuda()
return net
def calc_net_weight_count(net):
"""
Calculate number of model trainable parameters.
Parameters:
----------
net : Module
Model.
Returns:
-------
int
Number of parameters.
"""
net.train()
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def validate(metric,
net,
val_data,
use_cuda):
"""
Core validation/testing routine.
Parameters:
----------
metric : EvalMetric
Metric object instance.
net : Module
Model.
val_data : DataLoader
Data loader.
use_cuda : bool
Whether to use CUDA.
Returns:
-------
EvalMetric
Metric object instance.
"""
net.eval()
metric.reset()
with torch.no_grad():
for data, target in val_data:
if use_cuda:
target = target.cuda(non_blocking=True)
output = net(data)
metric.update(target, output)
return metric
def report_accuracy(metric,
extended_log=False):
"""
Make report string for composite metric.
Parameters:
----------
metric : EvalMetric
Metric object instance.
extended_log : bool, default False
Whether to log more precise accuracy values.
Returns:
-------
str
Report string.
"""
def create_msg(name, value):
if type(value) in [list, tuple]:
if extended_log:
return "{}={} ({})".format("{}", "/".join(["{:.4f}"] * len(value)), "/".join(["{}"] * len(value))).\
format(name, *(value + value))
else:
return "{}={}".format("{}", "/".join(["{:.4f}"] * len(value))).format(name, *value)
else:
if extended_log:
return "{name}={value:.4f} ({value})".format(name=name, value=value)
else:
return "{name}={value:.4f}".format(name=name, value=value)
metric_info = metric.get()
if isinstance(metric, CompositeEvalMetric):
msg = ", ".join([create_msg(name=m[0], value=m[1]) for m in zip(*metric_info)])
elif isinstance(metric, EvalMetric):
msg = create_msg(name=metric_info[0], value=metric_info[1])
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
return msg
def get_metric(metric_name, metric_extra_kwargs):
"""
Get metric by name.
Parameters:
----------
metric_name : str
Metric name.
metric_extra_kwargs : dict
Metric extra parameters.
EvalMetric
-------
EvalMetric
Metric object instance.
"""
if metric_name == "Top1Error":
return Top1Error(**metric_extra_kwargs)
elif metric_name == "TopKError":
return TopKError(**metric_extra_kwargs)
elif metric_name == "PixelAccuracyMetric":
return PixelAccuracyMetric(**metric_extra_kwargs)
elif metric_name == "MeanIoUMetric":
return MeanIoUMetric(**metric_extra_kwargs)
elif metric_name == "CocoDetMApMetric":
return CocoDetMApMetric(**metric_extra_kwargs)
elif metric_name == "CocoHpeOksApMetric":
return CocoHpeOksApMetric(**metric_extra_kwargs)
elif metric_name == "WER":
return WER(**metric_extra_kwargs)
else:
raise Exception("Wrong metric name: {}".format(metric_name))
def get_composite_metric(metric_names, metric_extra_kwargs):
"""
Get composite metric by list of metric names.
Parameters:
----------
metric_names : list of str
Metric name list.
metric_extra_kwargs : list of dict
Metric extra parameters list.
Returns:
-------
CompositeEvalMetric
Metric object instance.
"""
if len(metric_names) == 1:
metric = get_metric(metric_names[0], metric_extra_kwargs[0])
else:
metric = CompositeEvalMetric()
for name, extra_kwargs in zip(metric_names, metric_extra_kwargs):
metric.add(get_metric(name, extra_kwargs))
return metric
def get_metric_name(metric, index):
"""
Get metric name by index in the composite metric.
Parameters:
----------
metric : CompositeEvalMetric or EvalMetric
Metric object instance.
index : int
Index.
Returns:
-------
str
Metric name.
"""
if isinstance(metric, CompositeEvalMetric):
return metric.metrics[index].name
elif isinstance(metric, EvalMetric):
assert (index == 0)
return metric.name
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
| 8,538
| 26.996721
| 116
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/seg_metrics_np.py
|
"""
Routines for segmentation metrics on numpy.
"""
import numpy as np
__all__ = ['seg_pixel_accuracy_np', 'segm_mean_accuracy_hmasks', 'segm_mean_accuracy', 'seg_mean_iou_np',
'segm_mean_iou2', 'seg_mean_iou_imasks_np', 'segm_fw_iou_hmasks', 'segm_fw_iou']
def seg_pixel_accuracy_np(label_imask,
pred_imask,
vague_idx=-1,
use_vague=False,
macro_average=True,
empty_result=0.0):
"""
The segmentation pixel accuracy.
Parameters:
----------
label_imask : np.array
Ground truth index mask (maybe batch of).
pred_imask : np.array
Predicted index mask (maybe batch of).
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
macro_average : bool, default True
Whether to use micro or macro averaging.
empty_result : float, default 0.0
Result value for an image without any classes.
Returns:
-------
float or tuple of two ints
PA metric value.
"""
assert (label_imask.shape == pred_imask.shape)
if use_vague:
sum_u_ij = np.sum(label_imask.flat != vague_idx)
if sum_u_ij == 0:
if macro_average:
return empty_result
else:
return 0, 0
sum_u_ii = np.sum(np.logical_and(pred_imask.flat == label_imask.flat, label_imask.flat != vague_idx))
else:
sum_u_ii = np.sum(pred_imask.flat == label_imask.flat)
sum_u_ij = pred_imask.size
if macro_average:
return float(sum_u_ii) / sum_u_ij
else:
return sum_u_ii, sum_u_ij
def segm_mean_accuracy_hmasks(label_hmask,
pred_hmask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_hmask : np.array
Predicted one-hot mask.
Returns:
-------
float
MA metric value.
"""
assert (pred_hmask.shape == label_hmask.shape)
assert (len(pred_hmask.shape) == 3)
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = pred_hmask[i, :, :]
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
if u_i == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def segm_mean_accuracy(label_hmask,
pred_imask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_imask : np.array
Predicted index mask.
Returns:
-------
float
MA metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
if u_i == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def segm_mean_iou_imasks(label_hmask,
pred_hmask):
"""
The segmentation mean accuracy.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_hmask : np.array
Predicted one-hot mask.
Returns:
-------
float
MA metric value.
"""
assert (pred_hmask.shape == label_hmask.shape)
assert (len(pred_hmask.shape) == 3)
n = label_hmask.shape[0]
i_sum = 0
acc_sum = 0.0
for i in range(n):
class_i_pred_mask = pred_hmask[i, :, :]
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
if u_i == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
class_acc = float(u_ii) / u_i
acc_sum += class_acc
i_sum += 1
if i_sum > 0:
mean_acc = acc_sum / i_sum
else:
mean_acc = 1.0
return mean_acc
def seg_mean_iou_np(label_hmask,
pred_imask):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_imask : np.array
Predicted index mask.
Returns:
-------
float
MIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
i_sum = 0
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
u_ji_sj = np.sum(class_i_pred_mask)
if (u_i + u_ji_sj) == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
acc_iou += float(u_ii) / (u_i + u_ji_sj - u_ii)
i_sum += 1
if i_sum > 0:
mean_iou = acc_iou / i_sum
else:
mean_iou = 1.0
return mean_iou
def segm_mean_iou2(label_hmask,
pred_hmask):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_hmask : nd.array
Ground truth one-hot mask (batch of).
pred_hmask : nd.array
Predicted one-hot mask (batch of).
Returns:
-------
float
MIoU metric value.
"""
assert (len(label_hmask.shape) == 4)
assert (len(pred_hmask.shape) == 4)
assert (pred_hmask.shape == label_hmask.shape)
eps = np.finfo(np.float32).eps
class_axis = 1 # The axis that represents classes
inter_hmask = label_hmask * pred_hmask
u_i = label_hmask.sum(axis=[2, 3])
u_ji_sj = pred_hmask.sum(axis=[2, 3])
u_ii = inter_hmask.sum(axis=[2, 3])
class_count = (u_i + u_ji_sj > 0.0).sum(axis=class_axis) + eps
class_acc = u_ii / (u_i + u_ji_sj - u_ii + eps)
acc_iou = class_acc.sum(axis=class_axis) + eps
mean_iou = (acc_iou / class_count).mean().asscalar()
return mean_iou
def seg_mean_iou_imasks_np(label_imask,
pred_imask,
num_classes,
vague_idx=-1,
use_vague=False,
bg_idx=-1,
ignore_bg=False,
macro_average=True,
empty_result=0.0):
"""
The segmentation mean intersection over union.
Parameters:
----------
label_imask : nd.array
Ground truth index mask (batch of).
pred_imask : nd.array
Predicted index mask (batch of).
num_classes : int
Number of classes.
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
bg_idx : int, default -1
Index of background class.
ignore_bg : bool, default False
Whether to ignore background class.
macro_average : bool, default True
Whether to use micro or macro averaging.
empty_result : float, default 0.0
Result value for an image without any classes.
Returns:
-------
float or tuple of two np.arrays of int
MIoU metric value.
"""
assert (len(label_imask.shape) == 2)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_imask.shape)
min_i = 1
max_i = num_classes
n_bins = num_classes
if ignore_bg:
n_bins -= 1
if bg_idx != 0:
assert (bg_idx == num_classes - 1)
max_i -= 1
if not (ignore_bg and (bg_idx == 0)):
label_imask += 1
pred_imask += 1
vague_idx += 1
if use_vague:
label_imask = label_imask * (label_imask != vague_idx)
pred_imask = pred_imask * (pred_imask != vague_idx)
intersection = pred_imask * (pred_imask == label_imask)
area_inter, _ = np.histogram(intersection, bins=n_bins, range=(min_i, max_i))
area_pred, _ = np.histogram(pred_imask, bins=n_bins, range=(min_i, max_i))
area_label, _ = np.histogram(label_imask, bins=n_bins, range=(min_i, max_i))
area_union = area_pred + area_label - area_inter
assert ((not ignore_bg) or (len(area_inter) == num_classes - 1))
assert (ignore_bg or (len(area_inter) == num_classes))
if macro_average:
class_count = (area_union > 0).sum()
if class_count == 0:
return empty_result
eps = np.finfo(np.float32).eps
area_union = area_union + eps
mean_iou = (area_inter / area_union).sum() / class_count
return mean_iou
else:
return area_inter.astype(np.uint64), area_union.astype(np.uint64)
def segm_fw_iou_hmasks(label_hmask,
pred_hmask):
"""
The segmentation frequency weighted intersection over union.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_hmask : np.array
Predicted one-hot mask.
Returns:
-------
float
FrIoU metric value.
"""
assert (pred_hmask.shape == label_hmask.shape)
assert (len(pred_hmask.shape) == 3)
n = label_hmask.shape[0]
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = pred_hmask[i, :, :]
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
u_ji_sj = np.sum(class_i_pred_mask)
if (u_i + u_ji_sj) == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
acc_iou += float(u_i * u_ii) / (u_i + u_ji_sj - u_ii)
fw_factor = pred_hmask[0].size
return acc_iou / fw_factor
def segm_fw_iou(label_hmask,
pred_imask):
"""
The segmentation frequency weighted intersection over union.
Parameters:
----------
label_hmask : np.array
Ground truth one-hot mask.
pred_imask : np.array
Predicted index mask.
Returns:
-------
float
FrIoU metric value.
"""
assert (len(label_hmask.shape) == 3)
assert (len(pred_imask.shape) == 2)
assert (pred_imask.shape == label_hmask.shape[1:])
n = label_hmask.shape[0]
acc_iou = 0.0
for i in range(n):
class_i_pred_mask = (pred_imask == i)
class_i_label_mask = label_hmask[i, :, :]
u_i = np.sum(class_i_label_mask)
u_ji_sj = np.sum(class_i_pred_mask)
if (u_i + u_ji_sj) == 0:
continue
u_ii = np.sum(np.logical_and(class_i_pred_mask, class_i_label_mask))
acc_iou += float(u_i * u_ii) / (u_i + u_ji_sj - u_ii)
fw_factor = pred_imask.size
return acc_iou / fw_factor
| 11,447
| 25.5
| 109
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/seg_metrics.py
|
"""
Evaluation Metrics for Semantic Segmentation.
"""
import numpy as np
import torch
from .metric import EvalMetric, check_label_shapes
from .seg_metrics_np import seg_pixel_accuracy_np, seg_mean_iou_imasks_np
__all__ = ['PixelAccuracyMetric', 'MeanIoUMetric']
class PixelAccuracyMetric(EvalMetric):
"""
Computes the pixel-wise accuracy.
Parameters:
----------
axis : int, default 1
The axis that represents classes.
name : str, default 'pix_acc'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
on_cpu : bool, default True
Calculate on CPU.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
macro_average : bool, default True
Whether to use micro or macro averaging.
"""
def __init__(self,
axis=1,
name="pix_acc",
output_names=None,
label_names=None,
on_cpu=True,
sparse_label=True,
vague_idx=-1,
use_vague=False,
macro_average=True):
self.macro_average = macro_average
super(PixelAccuracyMetric, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names)
self.axis = axis
self.on_cpu = on_cpu
self.sparse_label = sparse_label
self.vague_idx = vague_idx
self.use_vague = use_vague
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
with torch.no_grad():
check_label_shapes(labels, preds)
if self.on_cpu:
if self.sparse_label:
label_imask = labels.cpu().numpy().astype(np.int32)
else:
label_imask = torch.argmax(labels, dim=self.axis).cpu().numpy().astype(np.int32)
pred_imask = torch.argmax(preds, dim=self.axis).cpu().numpy().astype(np.int32)
acc = seg_pixel_accuracy_np(
label_imask=label_imask,
pred_imask=pred_imask,
vague_idx=self.vague_idx,
use_vague=self.use_vague,
macro_average=self.macro_average)
if self.macro_average:
self.sum_metric += acc
self.num_inst += 1
else:
self.sum_metric += acc[0]
self.num_inst += acc[1]
else:
assert False
def reset(self):
"""
Resets the internal evaluation result to initial state.
"""
if self.macro_average:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = 0
self.sum_metric = 0
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.macro_average:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.sum_metric / self.num_inst
else:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, float(self.sum_metric) / self.num_inst
class MeanIoUMetric(EvalMetric):
"""
Computes the mean intersection over union.
Parameters:
----------
axis : int, default 1
The axis that represents classes
name : str, default 'mean_iou'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
on_cpu : bool, default True
Calculate on CPU.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
num_classes : int
Number of classes
vague_idx : int, default -1
Index of masked pixels.
use_vague : bool, default False
Whether to use pixel masking.
bg_idx : int, default -1
Index of background class.
ignore_bg : bool, default False
Whether to ignore background class.
macro_average : bool, default True
Whether to use micro or macro averaging.
"""
def __init__(self,
axis=1,
name="mean_iou",
output_names=None,
label_names=None,
on_cpu=True,
sparse_label=True,
num_classes=None,
vague_idx=-1,
use_vague=False,
bg_idx=-1,
ignore_bg=False,
macro_average=True):
self.macro_average = macro_average
self.num_classes = num_classes
self.ignore_bg = ignore_bg
super(MeanIoUMetric, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names)
assert ((not ignore_bg) or (bg_idx in (0, num_classes - 1)))
self.axis = axis
self.on_cpu = on_cpu
self.sparse_label = sparse_label
self.vague_idx = vague_idx
self.use_vague = use_vague
self.bg_idx = bg_idx
assert (on_cpu and sparse_label)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
assert (len(labels) == len(preds))
with torch.no_grad():
if self.on_cpu:
if self.sparse_label:
label_imask = labels.cpu().numpy().astype(np.int32)
else:
assert False
pred_imask = torch.argmax(preds, dim=self.axis).cpu().numpy().astype(np.int32)
batch_size = labels.shape[0]
for k in range(batch_size):
if self.sparse_label:
acc = seg_mean_iou_imasks_np(
label_imask=label_imask[k, :, :],
pred_imask=pred_imask[k, :, :],
num_classes=self.num_classes,
vague_idx=self.vague_idx,
use_vague=self.use_vague,
bg_idx=self.bg_idx,
ignore_bg=self.ignore_bg,
macro_average=self.macro_average)
else:
assert False
if self.macro_average:
self.sum_metric += acc
self.num_inst += 1
else:
self.area_inter += acc[0]
self.area_union += acc[1]
else:
assert False
def reset(self):
"""
Resets the internal evaluation result to initial state.
"""
if self.macro_average:
self.num_inst = 0
self.sum_metric = 0.0
else:
class_count = self.num_classes - 1 if self.ignore_bg else self.num_classes
self.area_inter = np.zeros((class_count,), np.uint64)
self.area_union = np.zeros((class_count,), np.uint64)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.macro_average:
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.sum_metric / self.num_inst
else:
class_count = (self.area_union > 0).sum()
if class_count == 0:
return self.name, float("nan")
eps = np.finfo(np.float32).eps
area_union_eps = self.area_union + eps
mean_iou = (self.area_inter / area_union_eps).sum() / class_count
return self.name, mean_iou
| 9,276
| 33.106618
| 100
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/ret_metrics.py
|
"""
Evaluation Metrics for Image Retrieval.
"""
import numpy as np
import torch
from .metric import EvalMetric
__all__ = ['PointDetectionMatchRatio', 'PointDescriptionMatchRatio']
class PointDetectionMatchRatio(EvalMetric):
"""
Computes point detection match ratio (with mean residual).
Parameters:
----------
pts_max_count : int
Maximal count of points.
axis : int, default 1
The axis that represents classes
name : str, default 'accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
pts_max_count,
axis=1,
name="pt_det_ratio",
output_names=None,
label_names=None):
super(PointDetectionMatchRatio, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.axis = axis
self.pts_max_count = pts_max_count
self.resudual_sum = 0.0
self.resudual_count = 0
def update_alt(self,
homography,
src_pts,
dst_pts,
src_confs,
dst_confs,
src_img_size,
dst_img_size):
"""
Updates the internal evaluation result.
Parameters:
----------
homography : torch.Tensor
Homography (from source image to destination one).
src_pts : torch.Tensor
Detected points for the first (source) image.
dst_pts : torch.Tensor
Detected points for the second (destination) image.
src_confs : torch.Tensor
Confidences for detected points on the source image.
dst_confs : torch.Tensor
Confidences for detected points on the destination image.
src_img_size : tuple of 2 int
Size (H, W) of the source image.
dst_img_size : tuple of 2 int
Size (H, W) of the destination image.
"""
assert (src_confs.argsort(descending=True).cpu().detach().numpy() == np.arange(src_confs.shape[0])).all()
assert (dst_confs.argsort(descending=True).cpu().detach().numpy() == np.arange(dst_confs.shape[0])).all()
max_dist_sat_value = 1e5
eps = 1e-5
# print("src_img_size={}".format(src_img_size))
# print("dst_img_size={}".format(dst_img_size))
homography = homography.to(src_pts.device)
self.normalize_homography(homography)
homography_inv = self.calc_homography_inv(homography)
# print("homography={}".format(homography))
# print("homography_inv={}".format(homography_inv))
# print("src_pts={}".format(src_pts[:10, :].int()))
src_pts = src_pts.flip(dims=(1,))
dst_pts = dst_pts.flip(dims=(1,))
# print("src_pts={}".format(src_pts[:10, :].int()))
# print("src_pts.shape={}".format(src_pts.shape))
# print("dst_pts.shape={}".format(dst_pts.shape))
# print("src_pts={}".format(src_pts[:10, :].int()))
# print("dst_pts={}".format(dst_pts[:10, :].int()))
# with torch.no_grad():
src_hmg_pts = self.calc_homogeneous_coords(src_pts.float())
dst_hmg_pts = self.calc_homogeneous_coords(dst_pts.float())
# print("src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
src_hmg_pts, src_confs = self.filter_inside_points(
src_hmg_pts,
src_confs,
homography,
dst_img_size)
dst_hmg_pts, dst_confs = self.filter_inside_points(
dst_hmg_pts,
dst_confs,
homography_inv,
src_img_size)
# print("src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
#
# print("src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
src_pts_count = src_hmg_pts.shape[0]
dst_pts_count = dst_hmg_pts.shape[0]
src_pts_count2 = min(src_pts_count, self.pts_max_count)
src_hmg_pts, conf_thr = self.filter_best_points(
hmg_pts=src_hmg_pts,
confs=src_confs,
max_count=src_pts_count2,
min_conf=None)
dst_pts_count2 = min(dst_pts_count, self.pts_max_count)
dst_hmg_pts, _ = self.filter_best_points(
hmg_pts=dst_hmg_pts,
confs=dst_confs,
max_count=dst_pts_count2,
min_conf=conf_thr)
# print("src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
# print("src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
preds_dst_hmg_pts = self.transform_points(
src_hmg_pts,
homography)
# print("preds_dst_hmg_pts={}".format(preds_dst_hmg_pts[:10, :].int()))
cost = self.calc_pairwise_distances(x=preds_dst_hmg_pts, y=dst_hmg_pts).cpu().detach().numpy()
self.saturate_distance_matrix(
dist_mat=cost,
max_dist_thr=8.0,
max_dist_sat=max_dist_sat_value)
# print("cost.shape={}".format(cost.shape))
from scipy.optimize import linear_sum_assignment
row_ind, col_ind = linear_sum_assignment(cost)
# print("row_ind.shape={}".format(row_ind.shape))
# print("col_ind.shape={}".format(col_ind.shape))
resuduals = cost[row_ind, col_ind]
resuduals = resuduals[resuduals < (max_dist_sat_value - eps)]
resudual_count = len(resuduals)
self.sum_metric += resudual_count
self.global_sum_metric += resudual_count
self.num_inst += src_pts_count2
self.global_num_inst += src_pts_count2
print("ratio_resudual={}".format(float(resudual_count) / src_pts_count2))
if resudual_count != 0:
self.resudual_sum += resuduals.sum()
self.resudual_count += resudual_count
@staticmethod
def normalize_homography(homography):
homography /= homography[2, 2]
@staticmethod
def calc_homography_inv(homography):
homography_inv = homography.inverse()
PointDetectionMatchRatio.normalize_homography(homography_inv)
return homography_inv
@staticmethod
def calc_homogeneous_coords(pts):
hmg_pts = torch.cat((pts, torch.ones((pts.shape[0], 1), dtype=pts.dtype, device=pts.device)), dim=1)
return hmg_pts
@staticmethod
def calc_cartesian_coords(hmg_pts):
pts = hmg_pts[:, :2]
return pts
@staticmethod
def transform_points(src_hmg_pts,
homography):
# print("transform_points -> src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("transform_points -> homography.shape={}".format(homography.shape))
# print("homography={}".format(homography))
# print("transform_points -> src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
dst_hmg_pts = torch.matmul(src_hmg_pts, homography.t())
# print("transform_points -> dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
# print("transform_points -> dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
dst_hmg_pts /= dst_hmg_pts[:, 2:]
return dst_hmg_pts
@staticmethod
def calc_inside_pts_mask(pts,
img_size):
eps = 1e-3
border_size = 1.0
border = border_size - eps
mask = (pts[:, 0] >= border) & (pts[:, 0] < img_size[0] - border) &\
(pts[:, 1] >= border) & (pts[:, 1] < img_size[1] - border)
return mask
@staticmethod
def filter_inside_points(src_hmg_pts,
src_confs,
homography,
dst_img_size):
# print("fip->src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("fip->src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("fip->src_confs.shape={}".format(src_confs.shape))
# print("fip->src_confs={}".format(src_confs[:10]))
# print("homography_inv={}".format(homography))
dst_hmg_pts = PointDetectionMatchRatio.transform_points(src_hmg_pts, homography)
# print("fip->dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
# print("fip->dst_hmg_pts={}".format(dst_hmg_pts[:10, :]))
mask = PointDetectionMatchRatio.calc_inside_pts_mask(dst_hmg_pts, dst_img_size)
# print("fip->mask={}".format(mask[:10]))
# print("fip->mask.sum()={}".format(mask.sum()))
return src_hmg_pts[mask], src_confs[mask]
@staticmethod
def filter_best_points(hmg_pts,
confs,
max_count,
min_conf=None):
if min_conf is not None:
max_ind = (confs < min_conf).nonzero()[0, 0].item()
max_count = max(max_count, max_ind)
inds = confs.argsort(descending=True)[:max_count]
return hmg_pts[inds], confs[inds][-1]
@staticmethod
def calc_pairwise_distances(x, y):
diff = x.unsqueeze(1) - y.unsqueeze(0)
return torch.sum(diff * diff, dim=-1).sqrt()
@staticmethod
def saturate_distance_matrix(dist_mat,
max_dist_thr,
max_dist_sat):
dist_mat[dist_mat > max_dist_thr] = max_dist_sat
class PointDescriptionMatchRatio(EvalMetric):
"""
Computes point description match ratio.
Parameters:
----------
pts_max_count : int
Maximal count of points.
dist_ratio_thr : float, default 0.9
Distance ratio threshold for point filtering.
axis : int, default 1
The axis that represents classes
name : str, default 'accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
pts_max_count,
dist_ratio_thr=0.95,
axis=1,
name="pt_desc_ratio",
output_names=None,
label_names=None):
super(PointDescriptionMatchRatio, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.axis = axis
self.pts_max_count = pts_max_count
self.dist_ratio_thr = dist_ratio_thr
self.resudual_sum = 0.0
self.resudual_count = 0
def update_alt(self,
homography,
src_pts,
dst_pts,
src_descs,
dst_descs,
src_img_size,
dst_img_size):
"""
Updates the internal evaluation result.
Parameters:
----------
homography : torch.Tensor
Homography (from source image to destination one).
src_pts : torch.Tensor
Detected points for the first (source) image.
dst_pts : torch.Tensor
Detected points for the second (destination) image.
src_descs : torch.Tensor
Descriptors for detected points on the source image.
dst_descs : torch.Tensor
Descriptors for detected points on the destination image.
src_img_size : tuple of 2 int
Size (H, W) of the source image.
dst_img_size : tuple of 2 int
Size (H, W) of the destination image.
"""
# max_dist_sat_value = 1e5
# eps = 1e-5
homography = homography.to(src_pts.device)
self.normalize_homography(homography)
homography_inv = self.calc_homography_inv(homography)
src_pts = src_pts.flip(dims=(1,))
dst_pts = dst_pts.flip(dims=(1,))
src_hmg_pts = self.calc_homogeneous_coords(src_pts.float())
dst_hmg_pts = self.calc_homogeneous_coords(dst_pts.float())
src_hmg_pts = self.filter_inside_points(
src_hmg_pts,
homography,
dst_img_size)
dst_hmg_pts = self.filter_inside_points(
dst_hmg_pts,
homography_inv,
src_img_size)
src_pts_count = src_hmg_pts.shape[0]
dst_pts_count = dst_hmg_pts.shape[0]
src_pts_count2 = min(src_pts_count, self.pts_max_count * 10)
src_hmg_pts, src_descs = self.filter_best_points(
hmg_pts=src_hmg_pts,
descs=src_descs,
max_count=src_pts_count2)
dst_pts_count2 = min(dst_pts_count, self.pts_max_count * 10)
dst_hmg_pts, dst_descs = self.filter_best_points(
hmg_pts=dst_hmg_pts,
descs=dst_descs,
max_count=dst_pts_count2)
dist_mat = self.calc_pairwise_distances(x=src_descs, y=dst_descs)
vals, inds = dist_mat.topk(k=2, dim=1, largest=True, sorted=True)
inds = inds[:, 0][(vals[:, 1] / vals[:, 0]) < 0.95]
src_hmg_pts = src_hmg_pts[inds]
preds_dst_hmg_pts = self.transform_points(
src_hmg_pts,
homography)
print(preds_dst_hmg_pts)
# self.saturate_distance_matrix(
# dist_mat=cost,
# max_dist_thr=8.0,
# max_dist_sat=max_dist_sat_value)
#
# # print("cost.shape={}".format(cost.shape))
#
# from scipy.optimize import linear_sum_assignment
# row_ind, col_ind = linear_sum_assignment(cost)
#
# # print("row_ind.shape={}".format(row_ind.shape))
# # print("col_ind.shape={}".format(col_ind.shape))
#
# resuduals = cost[row_ind, col_ind]
# resuduals = resuduals[resuduals < (max_dist_sat_value - eps)]
# resudual_count = len(resuduals)
resudual_count = 1
self.sum_metric += resudual_count
self.global_sum_metric += resudual_count
self.num_inst += src_pts_count2
self.global_num_inst += src_pts_count2
print("ratio_resudual={}".format(float(resudual_count) / src_pts_count2))
@staticmethod
def normalize_homography(homography):
homography /= homography[2, 2]
@staticmethod
def calc_homography_inv(homography):
homography_inv = homography.inverse()
PointDetectionMatchRatio.normalize_homography(homography_inv)
return homography_inv
@staticmethod
def calc_homogeneous_coords(pts):
hmg_pts = torch.cat((pts, torch.ones((pts.shape[0], 1), dtype=pts.dtype, device=pts.device)), dim=1)
return hmg_pts
@staticmethod
def calc_cartesian_coords(hmg_pts):
pts = hmg_pts[:, :2]
return pts
@staticmethod
def transform_points(src_hmg_pts,
homography):
# print("transform_points -> src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("transform_points -> homography.shape={}".format(homography.shape))
# print("homography={}".format(homography))
# print("transform_points -> src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
dst_hmg_pts = torch.matmul(src_hmg_pts, homography.t())
# print("transform_points -> dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
# print("transform_points -> dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
dst_hmg_pts /= dst_hmg_pts[:, 2:]
return dst_hmg_pts
@staticmethod
def calc_inside_pts_mask(pts,
img_size):
eps = 1e-3
border_size = 1.0
border = border_size - eps
mask = (pts[:, 0] >= border) & (pts[:, 0] < img_size[0] - border) &\
(pts[:, 1] >= border) & (pts[:, 1] < img_size[1] - border)
return mask
@staticmethod
def filter_inside_points(src_hmg_pts,
homography,
dst_img_size):
dst_hmg_pts = PointDetectionMatchRatio.transform_points(src_hmg_pts, homography)
mask = PointDetectionMatchRatio.calc_inside_pts_mask(dst_hmg_pts, dst_img_size)
return src_hmg_pts[mask]
@staticmethod
def filter_best_points(hmg_pts,
descs,
max_count):
return hmg_pts[:max_count], descs[:max_count]
@staticmethod
def calc_pairwise_distances(x, y):
diff = x.unsqueeze(1) - y.unsqueeze(0)
return torch.sum(diff * diff, dim=-1).sqrt()
@staticmethod
def saturate_distance_matrix(dist_mat,
max_dist_thr,
max_dist_sat):
dist_mat[dist_mat > max_dist_thr] = max_dist_sat
| 17,535
| 34.56998
| 113
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/cls_metrics.py
|
"""
Evaluation Metrics for Image Classification.
"""
import numpy as np
import torch
from .metric import EvalMetric
__all__ = ['Top1Error', 'TopKError']
class Accuracy(EvalMetric):
"""
Computes accuracy classification score.
Parameters:
----------
axis : int, default 1
The axis that represents classes
name : str, default 'accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
axis=1,
name="accuracy",
output_names=None,
label_names=None):
super(Accuracy, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.axis = axis
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data with class indices as values, one per sample.
preds : torch.Tensor
Prediction values for samples. Each prediction value can either be the class index,
or a vector of likelihoods for all classes.
"""
assert (len(labels) == len(preds))
with torch.no_grad():
if preds.shape != labels.shape:
pred_label = torch.argmax(preds, dim=self.axis)
else:
pred_label = preds
pred_label = pred_label.cpu().numpy().astype(np.int32)
label = labels.cpu().numpy().astype(np.int32)
label = label.flat
pred_label = pred_label.flat
num_correct = (pred_label == label).sum()
self.sum_metric += num_correct
self.global_sum_metric += num_correct
self.num_inst += len(pred_label)
self.global_num_inst += len(pred_label)
class TopKAccuracy(EvalMetric):
"""
Computes top k predictions accuracy.
Parameters:
----------
top_k : int, default 1
Whether targets are in top k predictions.
name : str, default 'top_k_accuracy'
Name of this metric instance for display.
torch_like : bool, default True
Whether to use pytorch-like algorithm.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
top_k=1,
name="top_k_accuracy",
torch_like=True,
output_names=None,
label_names=None):
super(TopKAccuracy, self).__init__(
name,
top_k=top_k,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.top_k = top_k
assert (self.top_k > 1), "Please use Accuracy if top_k is no more than 1"
self.name += "_{:d}".format(self.top_k)
self.torch_like = torch_like
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
assert (len(labels) == len(preds))
with torch.no_grad():
if self.torch_like:
_, pred = preds.topk(k=self.top_k, dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(labels.view(1, -1).expand_as(pred))
# num_correct = correct.view(-1).float().sum(dim=0, keepdim=True).item()
num_correct = correct.flatten().float().sum(dim=0, keepdim=True).item()
num_samples = labels.size(0)
assert (num_correct <= num_samples)
self.sum_metric += num_correct
self.global_sum_metric += num_correct
self.num_inst += num_samples
self.global_num_inst += num_samples
else:
assert(len(preds.shape) <= 2), "Predictions should be no more than 2 dims"
pred_label = preds.cpu().numpy().astype(np.int32)
pred_label = np.argpartition(pred_label, -self.top_k)
label = labels.cpu().numpy().astype(np.int32)
assert (len(label) == len(pred_label))
num_samples = pred_label.shape[0]
num_dims = len(pred_label.shape)
if num_dims == 1:
num_correct = (pred_label.flat == label.flat).sum()
self.sum_metric += num_correct
self.global_sum_metric += num_correct
elif num_dims == 2:
num_classes = pred_label.shape[1]
top_k = min(num_classes, self.top_k)
for j in range(top_k):
num_correct = (pred_label[:, num_classes - 1 - j].flat == label.flat).sum()
self.sum_metric += num_correct
self.global_sum_metric += num_correct
self.num_inst += num_samples
self.global_num_inst += num_samples
class Top1Error(Accuracy):
"""
Computes top-1 error (inverted accuracy classification score).
Parameters:
----------
axis : int, default 1
The axis that represents classes.
name : str, default 'top_1_error'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
axis=1,
name="top_1_error",
output_names=None,
label_names=None):
super(Top1Error, self).__init__(
axis=axis,
name=name,
output_names=output_names,
label_names=label_names)
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, 1.0 - self.sum_metric / self.num_inst
class TopKError(TopKAccuracy):
"""
Computes top-k error (inverted top k predictions accuracy).
Parameters:
----------
top_k : int
Whether targets are out of top k predictions, default 1
name : str, default 'top_k_error'
Name of this metric instance for display.
torch_like : bool, default True
Whether to use pytorch-like algorithm.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
top_k=1,
name="top_k_error",
torch_like=True,
output_names=None,
label_names=None):
name_ = name
super(TopKError, self).__init__(
top_k=top_k,
name=name,
torch_like=torch_like,
output_names=output_names,
label_names=label_names)
self.name = name_.replace("_k_", "_{}_".format(top_k))
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, 1.0 - self.sum_metric / self.num_inst
| 8,783
| 33.996016
| 99
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/det_metrics.py
|
"""
Evaluation Metrics for Object Detection.
"""
import warnings
import numpy as np
import mxnet as mx
__all__ = ['CocoDetMApMetric']
class CocoDetMApMetric(mx.metric.EvalMetric):
"""
Detection metric for COCO bbox task.
Parameters:
----------
img_height : int
Processed image height.
coco_annotations_file_path : str
COCO anotation file path.
contiguous_id_to_json : list of int
Processed IDs.
validation_ids : bool, default False
Whether to use temporary file for estimation.
use_file : bool, default False
Whether to use temporary file for estimation.
score_thresh : float, default 0.05
Detection results with confident scores smaller than `score_thresh` will be discarded before saving to results.
data_shape : tuple of int, default is None
If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions.
This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be
fixed for all validation images.
post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h)
If not None, the bounding boxes will be affine transformed rather than simply scaled.
name : str, default 'mAP'
Name of this metric instance for display.
"""
def __init__(self,
img_height,
coco_annotations_file_path,
contiguous_id_to_json,
validation_ids=None,
use_file=False,
score_thresh=0.05,
data_shape=None,
post_affine=None,
name="mAP"):
super(CocoDetMApMetric, self).__init__(name=name)
self.img_height = img_height
self.coco_annotations_file_path = coco_annotations_file_path
self.contiguous_id_to_json = contiguous_id_to_json
self.validation_ids = validation_ids
self.use_file = use_file
self.score_thresh = score_thresh
self.current_idx = 0
self.coco_result = []
if isinstance(data_shape, (tuple, list)):
assert len(data_shape) == 2, "Data shape must be (height, width)"
elif not data_shape:
data_shape = None
else:
raise ValueError("data_shape must be None or tuple of int as (height, width)")
self._data_shape = data_shape
if post_affine is not None:
assert self._data_shape is not None, "Using post affine transform requires data_shape"
self._post_affine = post_affine
else:
self._post_affine = None
from pycocotools.coco import COCO
self.gt = COCO(self.coco_annotations_file_path)
self._img_ids = sorted(self.gt.getImgIds())
def reset(self):
self.current_idx = 0
self.coco_result = []
def get(self):
"""
Get evaluation metrics.
"""
if self.current_idx != len(self._img_ids):
warnings.warn("Recorded {} out of {} validation images, incomplete results".format(
self.current_idx, len(self._img_ids)))
from pycocotools.coco import COCO
gt = COCO(self.coco_annotations_file_path)
import tempfile
import json
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
json.dump(self.coco_result, f)
f.flush()
pred = gt.loadRes(f.name)
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, "bbox")
if self.validation_ids is not None:
coco_eval.params.imgIds = self.validation_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return self.name, tuple(coco_eval.stats[:3])
def update2(self,
pred_bboxes,
pred_labels,
pred_scores):
"""
Update internal buffer with latest predictions. Note that the statistics are not available until you call
self.get() to return the metrics.
Parameters:
----------
pred_bboxes : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes with shape `B, N, 4`.
Where B is the size of mini-batch, N is the number of bboxes.
pred_labels : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes labels with shape `B, N`.
pred_scores : mxnet.NDArray or numpy.ndarray
Prediction bounding boxes scores with shape `B, N`.
"""
def as_numpy(a):
"""
Convert a (list of) mx.NDArray into numpy.ndarray
"""
if isinstance(a, (list, tuple)):
out = [x.asnumpy() if isinstance(x, mx.nd.NDArray) else x for x in a]
return np.concatenate(out, axis=0)
elif isinstance(a, mx.nd.NDArray):
a = a.asnumpy()
return a
for pred_bbox, pred_label, pred_score in zip(*[as_numpy(x) for x in [pred_bboxes, pred_labels, pred_scores]]):
valid_pred = np.where(pred_label.flat >= 0)[0]
pred_bbox = pred_bbox[valid_pred, :].astype(np.float)
pred_label = pred_label.flat[valid_pred].astype(int)
pred_score = pred_score.flat[valid_pred].astype(np.float)
imgid = self._img_ids[self.current_idx]
self.current_idx += 1
affine_mat = None
if self._data_shape is not None:
entry = self.gt.loadImgs(imgid)[0]
orig_height = entry["height"]
orig_width = entry["width"]
height_scale = float(orig_height) / self._data_shape[0]
width_scale = float(orig_width) / self._data_shape[1]
if self._post_affine is not None:
affine_mat = self._post_affine(orig_width, orig_height, self._data_shape[1], self._data_shape[0])
else:
height_scale, width_scale = (1.0, 1.0)
# for each bbox detection in each image
for bbox, label, score in zip(pred_bbox, pred_label, pred_score):
if label not in self.contiguous_id_to_json:
# ignore non-exist class
continue
if score < self.score_thresh:
continue
category_id = self.contiguous_id_to_json[label]
# rescale bboxes/affine transform bboxes
if affine_mat is not None:
bbox[0:2] = self.affine_transform(bbox[0:2], affine_mat)
bbox[2:4] = self.affine_transform(bbox[2:4], affine_mat)
else:
bbox[[0, 2]] *= width_scale
bbox[[1, 3]] *= height_scale
# convert [xmin, ymin, xmax, ymax] to [xmin, ymin, w, h]
bbox[2:4] -= (bbox[:2] - 1)
self.coco_result.append({"image_id": imgid,
"category_id": category_id,
"bbox": bbox[:4].tolist(),
"score": score})
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
assert (labels is not None)
# label = labels.cpu().detach().numpy()
pred = preds.cpu().detach().numpy()
det_bboxes = []
det_ids = []
det_scores = []
bboxes = pred[:, :, :4]
ids = pred[:, :, 4]
scores = pred[:, :, 5]
det_ids.append(ids)
det_scores.append(scores)
det_bboxes.append(bboxes.clip(0, self.img_height))
self.update2(det_bboxes, det_ids, det_scores)
@staticmethod
def affine_transform(pt, t):
"""
Apply affine transform to a bounding box given transform matrix t.
Parameters:
----------
pt : numpy.ndarray
Bounding box with shape (1, 2).
t : numpy.ndarray
Transformation matrix with shape (2, 3).
Returns:
-------
numpy.ndarray
New bounding box with shape (1, 2).
"""
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
| 8,548
| 36.495614
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/hpe_metrics.py
|
"""
Evaluation Metrics for Human Pose Estimation.
"""
from .metric import EvalMetric
__all__ = ['CocoHpeOksApMetric']
class CocoHpeOksApMetric(EvalMetric):
"""
Detection metric for COCO Keypoint task.
Parameters:
----------
coco_annotations_file_path : str
COCO anotation file path.
pose_postprocessing_fn : func
An function for pose post-processing.
use_file : bool, default False
Whether to use temporary file for estimation.
validation_ids : bool, default False
Whether to use temporary file for estimation.
name : str, default 'CocoOksAp'
Name of this metric instance for display.
"""
def __init__(self,
coco_annotations_file_path,
pose_postprocessing_fn,
validation_ids=None,
use_file=False,
name="CocoOksAp"):
super(CocoHpeOksApMetric, self).__init__(name=name)
self.coco_annotations_file_path = coco_annotations_file_path
self.pose_postprocessing_fn = pose_postprocessing_fn
self.validation_ids = validation_ids
self.use_file = use_file
self.coco_result = []
def reset(self):
self.coco_result = []
def get(self):
"""
Get evaluation metrics.
"""
import copy
from pycocotools.coco import COCO
gt = COCO(self.coco_annotations_file_path)
if self.use_file:
import tempfile
import json
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
json.dump(self.coco_result, f)
f.flush()
pred = gt.loadRes(f.name)
else:
def calc_pred(coco, anns):
import numpy as np
import copy
pred = COCO()
pred.dataset["images"] = [img for img in coco.dataset["images"]]
annsImgIds = [ann["image_id"] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(coco.getImgIds()))
pred.dataset["categories"] = copy.deepcopy(coco.dataset["categories"])
for id, ann in enumerate(anns):
s = ann["keypoints"]
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann["area"] = (x1 - x0) * (y1 - y0)
ann["id"] = id + 1
ann["bbox"] = [x0, y0, x1 - x0, y1 - y0]
pred.dataset["annotations"] = anns
pred.createIndex()
return pred
pred = calc_pred(gt, copy.deepcopy(self.coco_result))
from pycocotools.cocoeval import COCOeval
coco_eval = COCOeval(gt, pred, "keypoints")
if self.validation_ids is not None:
coco_eval.params.imgIds = self.validation_ids
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return self.name, tuple(coco_eval.stats[:3])
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
label = labels.cpu().detach().numpy()
pred = preds.cpu().detach().numpy()
pred_pts_score, pred_person_score, label_img_id = self.pose_postprocessing_fn(pred, label)
for idx in range(len(pred_pts_score)):
image_id = int(label_img_id[idx])
kpt = pred_pts_score[idx].flatten().tolist()
score = float(pred_person_score[idx])
self.coco_result.append({
"image_id": image_id,
"category_id": 1,
"keypoints": kpt,
"score": score})
| 3,966
| 32.058333
| 98
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/asr_metrics.py
|
"""
Evaluation Metrics for Automatic Speech Recognition (ASR).
"""
from .metric import EvalMetric
__all__ = ['WER']
class WER(EvalMetric):
"""
Computes Word Error Rate (WER) for Automatic Speech Recognition (ASR).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
name : str, default 'wer'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
vocabulary,
name="wer",
output_names=None,
label_names=None):
super(WER, self).__init__(
name=name,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.vocabulary = vocabulary
self.ctc_decoder = CtcDecoder(vocabulary=vocabulary)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data with class indices as values, one per sample.
preds : torch.Tensor
Prediction values for samples. Each prediction value can either be the class index,
or a vector of likelihoods for all classes.
"""
import editdistance
labels_code = labels.cpu().numpy()
labels = []
for label_code in labels_code:
label_text = "".join([self.ctc_decoder.labels_map[c] for c in label_code])
labels.append(label_text)
preds = preds[0]
greedy_predictions = preds.transpose(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).cpu().numpy()
preds = self.ctc_decoder(greedy_predictions)
assert (len(labels) == len(preds))
for pred, label in zip(preds, labels):
pred = pred.split()
label = label.split()
word_error_count = editdistance.eval(label, pred)
word_count = max(len(label), len(pred))
assert (word_error_count <= word_count)
self.sum_metric += word_error_count
self.global_sum_metric += word_error_count
self.num_inst += word_count
self.global_num_inst += word_count
class CtcDecoder(object):
"""
CTC decoder (to decode a sequence of labels to words).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
"""
def __init__(self,
vocabulary):
super().__init__()
self.blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
def __call__(self,
predictions):
"""
Decode a sequence of labels to words.
Parameters:
----------
predictions : np.array of int or list of list of int
Tensor with predicted labels.
Returns:
-------
list of str
Words.
"""
hypotheses = []
for prediction in predictions:
decoded_prediction = []
previous = self.blank_id
for p in prediction:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = "".join([self.labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
| 3,814
| 30.528926
| 114
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/metrics/metric.py
|
"""
Several base metrics.
"""
__all__ = ['EvalMetric', 'CompositeEvalMetric', 'check_label_shapes']
from collections import OrderedDict
def check_label_shapes(labels, preds, shape=False):
"""
Helper function for checking shape of label and prediction.
Parameters:
----------
labels : list of torch.Tensor
The labels of the data.
preds : list of torch.Tensor
Predicted values.
shape : boolean
If True, check the shape of labels and preds, otherwise only check their length.
"""
if not shape:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of predictions {}".format(label_shape, pred_shape))
class EvalMetric(object):
"""
Base class for all evaluation metrics.
Parameters:
----------
name : str
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
name,
output_names=None,
label_names=None,
**kwargs):
super(EvalMetric, self).__init__()
self.name = str(name)
self.output_names = output_names
self.label_names = label_names
self._has_global_stats = kwargs.pop("has_global_stats", False)
self._kwargs = kwargs
self.reset()
def __str__(self):
return "EvalMetric: {}".format(dict(self.get_name_value()))
def get_config(self):
"""
Save configurations of metric. Can be recreated from configs with metric.create(**config).
"""
config = self._kwargs.copy()
config.update({
"metric": self.__class__.__name__,
"name": self.name,
"output_names": self.output_names,
"label_names": self.label_names})
return config
def update_dict(self, label, pred):
"""
Update the internal evaluation with named label and pred.
Parameters:
----------
labels : OrderedDict of str -> torch.Tensor
name to array mapping for labels.
preds : OrderedDict of str -> torch.Tensor
name to array mapping of predicted outputs.
"""
if self.output_names is not None:
pred = [pred[name] for name in self.output_names]
else:
pred = list(pred.values())
if self.label_names is not None:
label = [label[name] for name in self.label_names]
else:
label = list(label.values())
self.update(label, pred)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
raise NotImplementedError()
def reset(self):
"""
Resets the internal evaluation result to initial state.
"""
self.num_inst = 0
self.sum_metric = 0.0
self.global_num_inst = 0
self.global_sum_metric = 0.0
def reset_local(self):
"""
Resets the local portion of the internal evaluation results to initial state.
"""
self.num_inst = 0
self.sum_metric = 0.0
def get(self):
"""
Gets the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.sum_metric / self.num_inst
def get_global(self):
"""
Gets the current global evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self._has_global_stats:
if self.global_num_inst == 0:
return self.name, float("nan")
else:
return self.name, self.global_sum_metric / self.global_num_inst
else:
return self.get()
def get_name_value(self):
"""
Returns zipped name and value pairs.
Returns:
-------
list of tuples
A (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
def get_global_name_value(self):
"""
Returns zipped name and value pairs for global results.
Returns:
-------
list of tuples
A (name, value) tuple list.
"""
if self._has_global_stats:
name, value = self.get_global()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
else:
return self.get_name_value()
class CompositeEvalMetric(EvalMetric):
"""
Manages multiple evaluation metrics.
Parameters:
----------
name : str, default 'composite'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
name="composite",
output_names=None,
label_names=None):
super(CompositeEvalMetric, self).__init__(
name,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.metrics = []
def add(self, metric):
"""
Adds a child metric.
Parameters:
----------
metric
A metric instance.
"""
self.metrics.append(metric)
def update_dict(self, labels, preds):
if self.label_names is not None:
labels = OrderedDict([i for i in labels.items()
if i[0] in self.label_names])
if self.output_names is not None:
preds = OrderedDict([i for i in preds.items()
if i[0] in self.output_names])
for metric in self.metrics:
metric.update_dict(labels, preds)
def update(self, labels, preds):
"""
Updates the internal evaluation result.
Parameters:
----------
labels : torch.Tensor
The labels of the data.
preds : torch.Tensor
Predicted values.
"""
for metric in self.metrics:
metric.update(labels, preds)
def reset(self):
"""
Resets the internal evaluation result to initial state.
"""
try:
for metric in self.metrics:
metric.reset()
except AttributeError:
pass
def reset_local(self):
"""
Resets the local portion of the internal evaluation results to initial state.
"""
try:
for metric in self.metrics:
metric.reset_local()
except AttributeError:
pass
def get(self):
"""
Returns the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
names = []
values = []
for metric in self.metrics:
name, value = metric.get()
name = [name]
value = [value]
names.extend(name)
values.extend(value)
return names, values
def get_global(self):
"""
Returns the current evaluation result.
Returns:
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
names = []
values = []
for metric in self.metrics:
name, value = metric.get_global()
name = [name]
value = [value]
names.extend(name)
values.extend(value)
return names, values
def get_config(self):
config = super(CompositeEvalMetric, self).get_config()
config.update({"metrics": [i.get_config() for i in self.metrics]})
return config
| 9,289
| 27.323171
| 117
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/imagenet1k_cls_dataset.py
|
"""
ImageNet-1K classification dataset.
"""
import os
import math
import cv2
import numpy as np
from PIL import Image
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
from .dataset_metainfo import DatasetMetaInfo
class ImageNet1K(ImageFolder):
"""
ImageNet-1K classification dataset.
Parameters:
----------
root : str, default '~/.torch/datasets/imagenet'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "imagenet"),
mode="train",
transform=None):
split = "train" if mode == "train" else "val"
root = os.path.join(root, split)
super(ImageNet1K, self).__init__(root=root, transform=transform)
class ImageNet1KMetaInfo(DatasetMetaInfo):
"""
Descriptor of ImageNet-1K dataset.
"""
def __init__(self):
super(ImageNet1KMetaInfo, self).__init__()
self.label = "ImageNet1K"
self.short_label = "imagenet"
self.root_dir_name = "imagenet"
self.dataset_class = ImageNet1K
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 1000
self.input_image_size = (224, 224)
self.resize_inv_factor = 0.875
self.train_metric_capts = ["Train.Top1"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err-top1"}]
self.val_metric_capts = ["Val.Top1", "Val.Top5"]
self.val_metric_names = ["Top1Error", "TopKError"]
self.val_metric_extra_kwargs = [{"name": "err-top1"}, {"name": "err-top5", "top_k": 5}]
self.saver_acc_ind = 1
self.train_transform = imagenet_train_transform
self.val_transform = imagenet_val_transform
self.test_transform = imagenet_val_transform
self.ml_type = "imgcls"
self.use_cv_resize = False
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.interpolation = Image.BILINEAR
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(ImageNet1KMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
default=self.input_image_size[0],
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=self.resize_inv_factor,
help="inverted ratio for input image crop")
parser.add_argument(
"--use-cv-resize",
action="store_true",
help="use OpenCV resize preprocessing")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=self.mean_rgb,
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=self.std_rgb,
help="STD of RGB channels in the dataset")
parser.add_argument(
"--interpolation",
type=int,
default=self.interpolation,
help="Preprocessing interpolation")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(ImageNet1KMetaInfo, self).update(args)
self.input_image_size = (args.input_size, args.input_size)
self.use_cv_resize = args.use_cv_resize
self.mean_rgb = args.mean_rgb
self.std_rgb = args.std_rgb
self.interpolation = args.interpolation
def imagenet_train_transform(ds_metainfo,
jitter_param=0.4):
"""
Create image transform sequence for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
jitter_param : float
How much to jitter values.
Returns:
-------
Compose
Image transform sequence.
"""
input_image_size = ds_metainfo.input_image_size
return transforms.Compose([
transforms.RandomResizedCrop(size=input_image_size, interpolation=ds_metainfo.interpolation),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=ds_metainfo.mean_rgb,
std=ds_metainfo.std_rgb)
])
def imagenet_val_transform(ds_metainfo):
"""
Create image transform sequence for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
ImageNet-1K dataset metainfo.
Returns:
-------
Compose
Image transform sequence.
"""
input_image_size = ds_metainfo.input_image_size
resize_value = calc_val_resize_value(
input_image_size=ds_metainfo.input_image_size,
resize_inv_factor=ds_metainfo.resize_inv_factor)
return transforms.Compose([
CvResize(size=resize_value, interpolation=ds_metainfo.interpolation) if ds_metainfo.use_cv_resize else
transforms.Resize(size=resize_value, interpolation=ds_metainfo.interpolation),
transforms.CenterCrop(size=input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=ds_metainfo.mean_rgb,
std=ds_metainfo.std_rgb)
])
class CvResize(object):
"""
Resize the input PIL Image to the given size via OpenCV.
Parameters:
----------
size : int or tuple of (W, H)
Size of output image.
interpolation : int, default PIL.Image.BILINEAR
Interpolation method for resizing. By default uses bilinear
interpolation.
"""
def __init__(self,
size,
interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Resize image.
Parameters:
----------
img : PIL.Image
input image.
Returns:
-------
PIL.Image
Resulted image.
"""
if self.interpolation == Image.NEAREST:
cv_interpolation = cv2.INTER_NEAREST
elif self.interpolation == Image.BILINEAR:
cv_interpolation = cv2.INTER_LINEAR
elif self.interpolation == Image.BICUBIC:
cv_interpolation = cv2.INTER_CUBIC
elif self.interpolation == Image.LANCZOS:
cv_interpolation = cv2.INTER_LANCZOS4
else:
raise ValueError()
cv_img = np.array(img)
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
out_size = (self.size, int(self.size * h / w))
else:
out_size = (int(self.size * w / h), self.size)
cv_img = cv2.resize(cv_img, dsize=out_size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
else:
cv_img = cv2.resize(cv_img, dsize=self.size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
def calc_val_resize_value(input_image_size=(224, 224),
resize_inv_factor=0.875):
"""
Calculate image resize value for validation subset.
Parameters:
----------
input_image_size : tuple of 2 int
Main script arguments.
resize_inv_factor : float
Resize inverted factor.
Returns:
-------
int
Resize value.
"""
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return resize_value
| 8,645
| 30.44
| 110
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/hpe_dataset.py
|
"""
Keypoint detection (2D single human pose estimation) dataset.
"""
import copy
import logging
import random
import cv2
import numpy as np
import torch
import torch.utils.data as data
class HpeDataset(data.Dataset):
def __init__(self,
cfg,
root,
image_set,
is_train,
transform=None):
self.num_joints = 0
self.pixel_std = 200
self.flip_pairs = []
self.parent_ids = []
self.is_train = is_train
self.root = root
self.image_set = image_set
self.output_path = cfg.OUTPUT_DIR
self.data_format = cfg.DATASET.DATA_FORMAT
self.scale_factor = cfg.DATASET.SCALE_FACTOR
self.rotation_factor = cfg.DATASET.ROT_FACTOR
self.flip = cfg.DATASET.FLIP
self.image_size = cfg.MODEL.IMAGE_SIZE
self.target_type = 'gaussian'
self.heatmap_size = cfg.MODEL.EXTRA.HEATMAP_SIZE
self.sigma = cfg.MODEL.EXTRA.SIGMA
self.transform = transform
self.db = []
def _get_db(self):
raise NotImplementedError
def evaluate(self, cfg, preds, output_dir, *args, **kwargs):
raise NotImplementedError
def __len__(self,):
return len(self.db)
def __getitem__(self, idx):
db_rec = copy.deepcopy(self.db[idx])
image_file = db_rec['image']
filename = db_rec['filename'] if 'filename' in db_rec else ''
imgnum = db_rec['imgnum'] if 'imgnum' in db_rec else ''
if self.data_format == 'zip':
from utils import zipreader
data_numpy = zipreader.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
else:
data_numpy = cv2.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
if data_numpy is None:
logging.error('=> fail to read {}'.format(image_file))
raise ValueError('Fail to read {}'.format(image_file))
joints = db_rec['joints_3d']
joints_vis = db_rec['joints_3d_vis']
c = db_rec['center']
s = db_rec['scale']
score = db_rec['score'] if 'score' in db_rec else 1
r = 0
if self.is_train:
sf = self.scale_factor
rf = self.rotation_factor
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0
if self.flip and random.random() <= 0.5:
data_numpy = data_numpy[:, ::-1, :]
joints, joints_vis = fliplr_joints(joints, joints_vis, data_numpy.shape[1], self.flip_pairs)
c[0] = data_numpy.shape[1] - c[0] - 1
trans = get_affine_transform(c, s, r, self.image_size)
input = cv2.warpAffine(
data_numpy,
trans,
(int(self.image_size[0]), int(self.image_size[1])),
flags=cv2.INTER_LINEAR)
if self.transform:
input = self.transform(input)
for i in range(self.num_joints):
if joints_vis[i, 0] > 0.0:
joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
target, target_weight = self.generate_target(joints, joints_vis)
target = torch.from_numpy(target)
target_weight = torch.from_numpy(target_weight)
meta = {
'image': image_file,
'filename': filename,
'imgnum': imgnum,
'joints': joints,
'joints_vis': joints_vis,
'center': c,
'scale': s,
'rotation': r,
'score': score
}
return input, target, target_weight, meta
def select_data(self, db):
db_selected = []
for rec in db:
num_vis = 0
joints_x = 0.0
joints_y = 0.0
for joint, joint_vis in zip(
rec['joints_3d'], rec['joints_3d_vis']):
if joint_vis[0] <= 0:
continue
num_vis += 1
joints_x += joint[0]
joints_y += joint[1]
if num_vis == 0:
continue
joints_x, joints_y = joints_x / num_vis, joints_y / num_vis
area = rec['scale'][0] * rec['scale'][1] * (self.pixel_std**2)
joints_center = np.array([joints_x, joints_y])
bbox_center = np.array(rec['center'])
diff_norm2 = np.linalg.norm(joints_center - bbox_center, 2)
ks = np.exp(-1.0 * (diff_norm2 ** 2) / (0.2 ** 2 * 2.0 * area))
metric = (0.2 / 16) * num_vis + 0.45 - 0.2 / 16
if ks > metric:
db_selected.append(rec)
logging.info('=> num db: {}'.format(len(db)))
logging.info('=> num selected db: {}'.format(len(db_selected)))
return db_selected
def generate_target(self,
joints,
joints_vis):
'''
:param joints: [num_joints, 3]
:param joints_vis: [num_joints, 3]
:return: target, target_weight(1: visible, 0: invisible)
'''
target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
target_weight[:, 0] = joints_vis[:, 0]
assert self.target_type == 'gaussian', 'Only support gaussian map now!'
if self.target_type == 'gaussian':
target = np.zeros((self.num_joints,
self.heatmap_size[1],
self.heatmap_size[0]),
dtype=np.float32)
tmp_size = self.sigma * 3
for joint_id in range(self.num_joints):
feat_stride = self.image_size / self.heatmap_size
mu_x = int(joints[joint_id][0] / feat_stride[0] + 0.5)
mu_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
# Check that any part of the gaussian is in-bounds
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# # Generate gaussian
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, target_weight
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale * 200.0
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def fliplr_joints(joints, joints_vis, width, matched_parts):
"""
flip coords
"""
# Flip horizontal
joints[:, 0] = width - joints[:, 0] - 1
# Change left-right parts
for pair in matched_parts:
joints[pair[0], :], joints[pair[1], :] = joints[pair[1], :], joints[pair[0], :].copy()
joints_vis[pair[0], :], joints_vis[pair[1], :] = joints_vis[pair[1], :], joints_vis[pair[0], :].copy()
return joints * joints_vis, joints_vis
| 9,597
| 32.559441
| 110
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/coco_hpe1_dataset.py
|
"""
COCO keypoint detection (2D single human pose estimation) dataset.
"""
import os
import copy
import cv2
import numpy as np
import torch
import torch.utils.data as data
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe1Dataset(data.Dataset):
"""
COCO keypoint detection (2D single human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
splits : list of str, default ['person_keypoints_val2017']
Json annotations name.
Candidates can be: person_keypoints_val2017, person_keypoints_train2017.
check_centers : bool, default is False
If true, will force check centers of bbox and keypoints, respectively.
If centers are far away from each other, remove this label.
skip_empty : bool, default is False
Whether skip entire image if no valid label is found. Use `False` if this dataset is
for validation to avoid COCO metric error.
"""
CLASSES = ["person"]
KEYPOINTS = {
0: "nose",
1: "left_eye",
2: "right_eye",
3: "left_ear",
4: "right_ear",
5: "left_shoulder",
6: "right_shoulder",
7: "left_elbow",
8: "right_elbow",
9: "left_wrist",
10: "right_wrist",
11: "left_hip",
12: "right_hip",
13: "left_knee",
14: "right_knee",
15: "left_ankle",
16: "right_ankle"
}
SKELETON = [
[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8],
[7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
def __init__(self,
root,
mode="train",
transform=None,
splits=("person_keypoints_val2017",),
check_centers=False,
skip_empty=True):
super(CocoHpe1Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
self.num_class = len(self.CLASSES)
if isinstance(splits, str):
splits = [splits]
self._splits = splits
self._coco = []
self._check_centers = check_centers
self._skip_empty = skip_empty
self.index_map = dict(zip(type(self).CLASSES, range(self.num_class)))
self.json_id_to_contiguous = None
self.contiguous_id_to_json = None
self._items, self._labels = self._load_jsons()
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
self.annotations_file_path = annotations_file_path
def __str__(self):
detail = ",".join([str(s) for s in self._splits])
return self.__class__.__name__ + "(" + detail + ")"
@property
def classes(self):
"""
Category names.
"""
return type(self).CLASSES
@property
def num_joints(self):
"""
Dataset defined: number of joints provided.
"""
return 17
@property
def joint_pairs(self):
"""
Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally.
"""
return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
@property
def coco(self):
"""
Return pycocotools object for evaluation purposes.
"""
if not self._coco:
raise ValueError("No coco objects found, dataset not initialized.")
if len(self._coco) > 1:
raise NotImplementedError(
"Currently we don't support evaluating {} JSON files".format(len(self._coco)))
return self._coco[0]
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
img_path = self._items[idx]
img_id = int(os.path.splitext(os.path.basename(img_path))[0])
label = copy.deepcopy(self._labels[idx])
# img = mx.image.imread(img_path, 1)
# img = Image.open(img_path).convert("RGB")
img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
if self.transform is not None:
img, scale, center, score = self.transform(img, label)
res_label = np.array([float(img_id)] + [float(score)] + list(center) + list(scale), np.float32)
img = torch.from_numpy(img)
res_label = torch.from_numpy(res_label)
return img, res_label
def _load_jsons(self):
"""
Load all image paths and labels from JSON annotation files into buffer.
"""
items = []
labels = []
from pycocotools.coco import COCO
for split in self._splits:
anno = os.path.join(self._root, "annotations", split) + ".json"
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
dirname, filename = entry["coco_url"].split("/")[-2:]
abs_path = os.path.join(self._root, dirname, filename)
if not os.path.exists(abs_path):
raise IOError("Image: {} not exists.".format(abs_path))
label = self._check_load_keypoints(_coco, entry)
if not label:
continue
# num of items are relative to person, not image
for obj in label:
items.append(abs_path)
labels.append(obj)
return items, labels
def _check_load_keypoints(self, coco, entry):
"""
Check and load ground-truth keypoints.
"""
ann_ids = coco.getAnnIds(imgIds=entry["id"], iscrowd=False)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
width = entry["width"]
height = entry["height"]
for obj in objs:
contiguous_cid = self.json_id_to_contiguous[obj["category_id"]]
if contiguous_cid >= self.num_class:
# not class of interest
continue
if max(obj["keypoints"]) == 0:
continue
# convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound
xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height)
# require non-zero box area
if obj['area'] <= 0 or xmax <= xmin or ymax <= ymin:
continue
# joints 3d: (num_joints, 3, 2); 3 is for x, y, z; 2 is for position, visibility
joints_3d = np.zeros((self.num_joints, 3, 2), dtype=np.float32)
for i in range(self.num_joints):
joints_3d[i, 0, 0] = obj["keypoints"][i * 3 + 0]
joints_3d[i, 1, 0] = obj["keypoints"][i * 3 + 1]
# joints_3d[i, 2, 0] = 0
visible = min(1, obj["keypoints"][i * 3 + 2])
joints_3d[i, :2, 1] = visible
# joints_3d[i, 2, 1] = 0
if np.sum(joints_3d[:, 0, 1]) < 1:
# no visible keypoint
continue
if self._check_centers:
bbox_center, bbox_area = self._get_box_center_area((xmin, ymin, xmax, ymax))
kp_center, num_vis = self._get_keypoints_center_count(joints_3d)
ks = np.exp(-2 * np.sum(np.square(bbox_center - kp_center)) / bbox_area)
if (num_vis / 80.0 + 47 / 80.0) > ks:
continue
valid_objs.append({
"bbox": (xmin, ymin, xmax, ymax),
"joints_3d": joints_3d
})
if not valid_objs:
if not self._skip_empty:
# dummy invalid labels if no valid objects are found
valid_objs.append({
"bbox": np.array([-1, -1, 0, 0]),
"joints_3d": np.zeros((self.num_joints, 3, 2), dtype=np.float32)
})
return valid_objs
@staticmethod
def _get_box_center_area(bbox):
"""
Get bbox center.
"""
c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])
area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
return c, area
@staticmethod
def _get_keypoints_center_count(keypoints):
"""
Get geometric center of all keypoints.
"""
keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))
keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))
num = float(np.sum(keypoints[:, 0, 1]))
return np.array([keypoint_x / num, keypoint_y / num]), num
@staticmethod
def bbox_clip_xyxy(xyxy, width, height):
"""
Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary.
All bounding boxes will be clipped to the new region `(0, 0, width, height)`.
Parameters:
----------
xyxy : list, tuple or numpy.ndarray
The bbox in format (xmin, ymin, xmax, ymax).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
width : int or float
Boundary width.
height : int or float
Boundary height.
Returns:
-------
tuple or np.array
Description of returned object.
"""
if isinstance(xyxy, (tuple, list)):
if not len(xyxy) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy)))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[3]))
return x1, y1, x2, y2
elif isinstance(xyxy, np.ndarray):
if not xyxy.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3]))
return np.hstack((x1, y1, x2, y2))
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy)))
@staticmethod
def bbox_xywh_to_xyxy(xywh):
"""
Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax)
Parameters:
----------
xywh : list, tuple or numpy.ndarray
The bbox in format (x, y, w, h).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
Returns:
-------
tuple or np.ndarray
The converted bboxes in format (xmin, ymin, xmax, ymax).
If input is numpy.ndarray, return is numpy.ndarray correspondingly.
"""
if isinstance(xywh, (tuple, list)):
if not len(xywh) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh)))
w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0)
return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h
elif isinstance(xywh, np.ndarray):
if not xywh.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape))
xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1)))
return xyxy
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh)))
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeValTransform1(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
height = self.image_size[0]
width = self.image_size[1]
self.aspect_ratio = float(width / height)
self.mean = ds_metainfo.mean_rgb
self.std = ds_metainfo.std_rgb
def __call__(self, src, label):
bbox = label["bbox"]
assert len(bbox) == 4
xmin, ymin, xmax, ymax = bbox
center, scale = _box_to_center_scale(xmin, ymin, xmax - xmin, ymax - ymin, self.aspect_ratio)
score = label.get("score", 1)
h, w = self.image_size
trans = get_affine_transform(center, scale, 0, [w, h])
# src_np = np.array(src)
img = cv2.warpAffine(src, trans, (int(w), int(h)), flags=cv2.INTER_LINEAR)
# img = mx.nd.image.to_tensor(mx.nd.array(img))
# img = mx.nd.image.normalize(img, mean=self.mean, std=self.std)
img = img.astype(np.float32)
img = img / 255.0
img = (img - np.array(self.mean, np.float32)) / np.array(self.std, np.float32)
img = img.transpose((2, 0, 1))
return img, scale, center, score
def _box_to_center_scale(x, y, w, h, aspect_ratio=1.0, scale_mult=1.25):
pixel_std = 1
center = np.zeros((2,), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > aspect_ratio * h:
h = w / aspect_ratio
elif w < aspect_ratio * h:
w = h * aspect_ratio
scale = np.array(
[w * 1.0 / pixel_std, h * 1.0 / pixel_std], dtype=np.float32)
if center[0] != -1:
scale = scale * scale_mult
return center, scale
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(
img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpeValTransform2(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
height = self.image_size[0]
width = self.image_size[1]
self.aspect_ratio = float(width / height)
self.mean = ds_metainfo.mean_rgb
self.std = ds_metainfo.std_rgb
def __call__(self, src, label):
# print(src.shape)
bbox = label["bbox"]
assert len(bbox) == 4
score = label.get('score', 1)
img, scale_box = detector_to_alpha_pose(
src,
class_ids=np.array([[0.]]),
scores=np.array([[1.]]),
bounding_boxs=np.array(np.array([bbox])),
output_shape=self.image_size)
if scale_box.shape[0] == 1:
pt1 = np.array(scale_box[0, (0, 1)], dtype=np.float32)
pt2 = np.array(scale_box[0, (2, 3)], dtype=np.float32)
else:
assert scale_box.shape[0] == 4
pt1 = np.array(scale_box[(0, 1)], dtype=np.float32)
pt2 = np.array(scale_box[(2, 3)], dtype=np.float32)
return img[0].astype(np.float32), pt1, pt2, score
def detector_to_alpha_pose(img,
class_ids,
scores,
bounding_boxs,
output_shape=(256, 192),
thr=0.5):
boxes, scores = alpha_pose_detection_processor(
img=img,
boxes=bounding_boxs,
class_idxs=class_ids,
scores=scores,
thr=thr)
pose_input, upscale_bbox = alpha_pose_image_cropper(
source_img=img,
boxes=boxes,
output_shape=output_shape)
return pose_input, upscale_bbox
def alpha_pose_detection_processor(img,
boxes,
class_idxs,
scores,
thr=0.5):
if len(boxes.shape) == 3:
boxes = boxes.squeeze(axis=0)
if len(class_idxs.shape) == 3:
class_idxs = class_idxs.squeeze(axis=0)
if len(scores.shape) == 3:
scores = scores.squeeze(axis=0)
# cilp coordinates
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0., img.shape[1] - 1)
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0., img.shape[0] - 1)
# select boxes
mask1 = (class_idxs == 0).astype(np.int32)
mask2 = (scores > thr).astype(np.int32)
picked_idxs = np.where((mask1 + mask2) > 1)[0]
if picked_idxs.shape[0] == 0:
return None, None
else:
return boxes[picked_idxs], scores[picked_idxs]
def alpha_pose_image_cropper(source_img,
boxes,
output_shape=(256, 192)):
if boxes is None:
return None, boxes
# crop person poses
img_width, img_height = source_img.shape[1], source_img.shape[0]
tensors = np.zeros([boxes.shape[0], 3, output_shape[0], output_shape[1]])
out_boxes = np.zeros([boxes.shape[0], 4])
for i, box in enumerate(boxes):
img = source_img.copy()
box_width = box[2] - box[0]
box_height = box[3] - box[1]
if box_width > 100:
scale_rate = 0.2
else:
scale_rate = 0.3
# crop image
left = int(max(0, box[0] - box_width * scale_rate / 2))
up = int(max(0, box[1] - box_height * scale_rate / 2))
right = int(min(img_width - 1, max(left + 5, box[2] + box_width * scale_rate / 2)))
bottom = int(min(img_height - 1, max(up + 5, box[3] + box_height * scale_rate / 2)))
crop_width = right - left
if crop_width < 1:
continue
crop_height = bottom - up
if crop_height < 1:
continue
ul = np.array((left, up))
br = np.array((right, bottom))
img = cv_cropBox(img, ul, br, output_shape[0], output_shape[1])
img = img.astype(np.float32)
img = img / 255.0
img = img.transpose((2, 0, 1))
# img = mx.nd.image.to_tensor(np.array(img))
# img = img.transpose((2, 0, 1))
img[0] = img[0] - 0.406
img[1] = img[1] - 0.457
img[2] = img[2] - 0.480
assert (img.shape[0] == 3)
tensors[i] = img
out_boxes[i] = (left, up, right, bottom)
return tensors, out_boxes
def cv_cropBox(img, ul, br, resH, resW, pad_val=0):
ul = ul
br = (br - 1)
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.ndim == 2:
img = img[:, np.newaxis]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
img[:ul[1], :, :], img[:, :ul[0], :] = pad_val, pad_val
img[br[1] + 1:, :, :], img[:, br[0] + 1:, :] = pad_val, pad_val
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(img, trans, (resW, resH), flags=cv2.INTER_LINEAR)
return dst_img
# ---------------------------------------------------------------------------------------------------------------------
def recalc_pose1(keypoints,
bbs,
image_size):
def transform_preds(coords, center, scale, output_size):
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
center = bbs[:, :2]
scale = bbs[:, 2:4]
heatmap_height = image_size[0] // 4
heatmap_width = image_size[1] // 4
output_size = [heatmap_width, heatmap_height]
preds = np.zeros_like(keypoints)
for i in range(keypoints.shape[0]):
preds[i] = transform_preds(keypoints[i], center[i], scale[i], output_size)
return preds
def recalc_pose1b(pred,
label,
image_size,
visible_conf_threshold=0.0):
label_img_id = label[:, 0].astype(np.int32)
label_score = label[:, 1]
label_bbs = label[:, 2:6]
pred_keypoints = pred[:, :, :2]
pred_score = pred[:, :, 2]
pred[:, :, :2] = recalc_pose1(pred_keypoints, label_bbs, image_size)
pred_person_score = []
batch = pred_keypoints.shape[0]
num_joints = pred_keypoints.shape[1]
for idx in range(batch):
kpt_score = 0
count = 0
for i in range(num_joints):
mval = float(pred_score[idx][i])
if mval > visible_conf_threshold:
kpt_score += mval
count += 1
if count > 0:
kpt_score /= count
kpt_score = kpt_score * float(label_score[idx])
pred_person_score.append(kpt_score)
return pred, pred_person_score, label_img_id
def recalc_pose2(keypoints,
bbs,
image_size):
def transformBoxInvert(pt, ul, br, resH, resW):
center = np.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
lenW = lenH * resW / resH
_pt = (pt * lenH) / resH
if bool(((lenW - 1) / 2 - center[0]) > 0):
_pt[0] = _pt[0] - ((lenW - 1) / 2 - center[0])
if bool(((lenH - 1) / 2 - center[1]) > 0):
_pt[1] = _pt[1] - ((lenH - 1) / 2 - center[1])
new_point = np.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
pt2 = bbs[:, :2]
pt1 = bbs[:, 2:4]
heatmap_height = image_size[0] // 4
heatmap_width = image_size[1] // 4
preds = np.zeros_like(keypoints)
for i in range(keypoints.shape[0]):
for j in range(keypoints.shape[1]):
preds[i, j] = transformBoxInvert(keypoints[i, j], pt1[i], pt2[i], heatmap_height, heatmap_width)
return preds
def recalc_pose2b(pred,
label,
image_size,
visible_conf_threshold=0.0):
label_img_id = label[:, 0].astype(np.int32)
label_score = label[:, 1]
label_bbs = label[:, 2:6]
pred_keypoints = pred[:, :, :2]
pred_score = pred[:, :, 2]
pred[:, :, :2] = recalc_pose2(pred_keypoints, label_bbs, image_size)
pred_person_score = []
batch = pred_keypoints.shape[0]
num_joints = pred_keypoints.shape[1]
for idx in range(batch):
kpt_score = 0
count = 0
for i in range(num_joints):
mval = float(pred_score[idx][i])
if mval > visible_conf_threshold:
kpt_score += mval
count += 1
if count > 0:
kpt_score /= count
kpt_score = kpt_score * float(label_score[idx])
pred_person_score.append(kpt_score)
return pred, pred_person_score, label_img_id
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe1MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe1MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe1Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = CocoHpe1Dataset.classes
self.input_image_size = (256, 192)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose1b(x, y, self.input_image_size)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpeValTransform1
self.test_transform = CocoHpeValTransform1
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.model_type = 1
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe1MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--model-type",
type=int,
default=self.model_type,
help="model type (1=SimplePose, 2=AlphaPose)")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe1MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.model_type = args.model_type
if self.model_type == 1:
self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\
lambda x, y: recalc_pose1b(x, y, self.input_image_size)
self.val_transform = CocoHpeValTransform1
self.test_transform = CocoHpeValTransform1
else:
self.test_metric_extra_kwargs[0]["pose_postprocessing_fn"] =\
lambda x, y: recalc_pose2b(x, y, self.input_image_size)
self.val_transform = CocoHpeValTransform2
self.test_transform = CocoHpeValTransform2
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
| 30,012
| 33.817865
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/coco_det_dataset.py
|
"""
MS COCO object detection dataset.
"""
import os
import cv2
import logging
import mxnet as mx
import numpy as np
from PIL import Image
import torch.utils.data as data
from .dataset_metainfo import DatasetMetaInfo
__all__ = ['CocoDetMetaInfo']
class CocoDetDataset(data.Dataset):
"""
MS COCO detection dataset.
Parameters:
----------
root : str
Path to folder storing the dataset.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
splits : list of str, default ['instances_val2017']
Json annotations name.
Candidates can be: instances_val2017, instances_train2017.
min_object_area : float
Minimum accepted ground-truth area, if an object's area is smaller than this value,
it will be ignored.
skip_empty : bool, default is True
Whether skip images with no valid object. This should be `True` in training, otherwise
it will cause undefined behavior.
use_crowd : bool, default is True
Whether use boxes labeled as crowd instance.
"""
CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
def __init__(self,
root,
mode="train",
transform=None,
splits=('instances_val2017',),
min_object_area=0,
skip_empty=True,
use_crowd=True):
super(CocoDetDataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self._transform = transform
self.num_class = len(self.CLASSES)
self._min_object_area = min_object_area
self._skip_empty = skip_empty
self._use_crowd = use_crowd
if isinstance(splits, mx.base.string_types):
splits = [splits]
self._splits = splits
self.index_map = dict(zip(type(self).CLASSES, range(self.num_class)))
self.json_id_to_contiguous = None
self.contiguous_id_to_json = None
self._coco = []
self._items, self._labels, self._im_aspect_ratios = self._load_jsons()
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "instances_" + mode_name + "2017.json")
self.annotations_file_path = annotations_file_path
def __str__(self):
detail = ','.join([str(s) for s in self._splits])
return self.__class__.__name__ + '(' + detail + ')'
@property
def coco(self):
"""
Return pycocotools object for evaluation purposes.
"""
if not self._coco:
raise ValueError("No coco objects found, dataset not initialized.")
if len(self._coco) > 1:
raise NotImplementedError(
"Currently we don't support evaluating {} JSON files. \
Please use single JSON dataset and evaluate one by one".format(len(self._coco)))
return self._coco[0]
@property
def classes(self):
"""
Category names.
"""
return type(self).CLASSES
@property
def annotation_dir(self):
"""
The subdir for annotations. Default is 'annotations'(coco default)
For example, a coco format json file will be searched as
'root/annotation_dir/xxx.json'
You can override if custom dataset don't follow the same pattern
"""
return 'annotations'
def get_im_aspect_ratio(self):
"""Return the aspect ratio of each image in the order of the raw data."""
if self._im_aspect_ratios is not None:
return self._im_aspect_ratios
self._im_aspect_ratios = [None] * len(self._items)
for i, img_path in enumerate(self._items):
with Image.open(img_path) as im:
w, h = im.size
self._im_aspect_ratios[i] = 1.0 * w / h
return self._im_aspect_ratios
def _parse_image_path(self, entry):
"""How to parse image dir and path from entry.
Parameters:
----------
entry : dict
COCO entry, e.g. including width, height, image path, etc..
Returns:
-------
abs_path : str
Absolute path for corresponding image.
"""
dirname, filename = entry["coco_url"].split("/")[-2:]
abs_path = os.path.join(self._root, dirname, filename)
return abs_path
def __len__(self):
return len(self._items)
def __getitem__(self, idx):
img_path = self._items[idx]
label = self._labels[idx]
# img = mx.image.imread(img_path, 1)
img = cv2.imread(img_path, flags=cv2.IMREAD_COLOR)
label = np.array(label).copy()
if self._transform is not None:
img, label = self._transform(img, label)
return img, label
def _load_jsons(self):
"""
Load all image paths and labels from JSON annotation files into buffer.
"""
items = []
labels = []
im_aspect_ratios = []
from pycocotools.coco import COCO
for split in self._splits:
anno = os.path.join(self._root, self.annotation_dir, split) + ".json"
_coco = COCO(anno)
self._coco.append(_coco)
classes = [c["name"] for c in _coco.loadCats(_coco.getCatIds())]
if not classes == self.classes:
raise ValueError("Incompatible category names with COCO: ")
assert classes == self.classes
json_id_to_contiguous = {
v: k for k, v in enumerate(_coco.getCatIds())}
if self.json_id_to_contiguous is None:
self.json_id_to_contiguous = json_id_to_contiguous
self.contiguous_id_to_json = {
v: k for k, v in self.json_id_to_contiguous.items()}
else:
assert self.json_id_to_contiguous == json_id_to_contiguous
# iterate through the annotations
image_ids = sorted(_coco.getImgIds())
for entry in _coco.loadImgs(image_ids):
abs_path = self._parse_image_path(entry)
if not os.path.exists(abs_path):
raise IOError("Image: {} not exists.".format(abs_path))
label = self._check_load_bbox(_coco, entry)
if not label:
continue
im_aspect_ratios.append(float(entry["width"]) / entry["height"])
items.append(abs_path)
labels.append(label)
return items, labels, im_aspect_ratios
def _check_load_bbox(self, coco, entry):
"""
Check and load ground-truth labels.
"""
entry_id = entry['id']
# fix pycocotools _isArrayLike which don't work for str in python3
entry_id = [entry_id] if not isinstance(entry_id, (list, tuple)) else entry_id
ann_ids = coco.getAnnIds(imgIds=entry_id, iscrowd=None)
objs = coco.loadAnns(ann_ids)
# check valid bboxes
valid_objs = []
width = entry["width"]
height = entry["height"]
for obj in objs:
if obj["area"] < self._min_object_area:
continue
if obj.get("ignore", 0) == 1:
continue
if not self._use_crowd and obj.get("iscrowd", 0):
continue
# convert from (x, y, w, h) to (xmin, ymin, xmax, ymax) and clip bound
xmin, ymin, xmax, ymax = self.bbox_clip_xyxy(self.bbox_xywh_to_xyxy(obj["bbox"]), width, height)
# require non-zero box area
if obj["area"] > 0 and xmax > xmin and ymax > ymin:
contiguous_cid = self.json_id_to_contiguous[obj["category_id"]]
valid_objs.append([xmin, ymin, xmax, ymax, contiguous_cid])
if not valid_objs:
if not self._skip_empty:
# dummy invalid labels if no valid objects are found
valid_objs.append([-1, -1, -1, -1, -1])
return valid_objs
@staticmethod
def bbox_clip_xyxy(xyxy, width, height):
"""
Clip bounding box with format (xmin, ymin, xmax, ymax) to specified boundary.
All bounding boxes will be clipped to the new region `(0, 0, width, height)`.
Parameters:
----------
xyxy : list, tuple or numpy.ndarray
The bbox in format (xmin, ymin, xmax, ymax).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
width : int or float
Boundary width.
height : int or float
Boundary height.
Returns:
-------
tuple or np.array
Description of returned object.
"""
if isinstance(xyxy, (tuple, list)):
if not len(xyxy) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xyxy)))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[3]))
return x1, y1, x2, y2
elif isinstance(xyxy, np.ndarray):
if not xyxy.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xyxy.shape))
x1 = np.minimum(width - 1, np.maximum(0, xyxy[:, 0]))
y1 = np.minimum(height - 1, np.maximum(0, xyxy[:, 1]))
x2 = np.minimum(width - 1, np.maximum(0, xyxy[:, 2]))
y2 = np.minimum(height - 1, np.maximum(0, xyxy[:, 3]))
return np.hstack((x1, y1, x2, y2))
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xyxy)))
@staticmethod
def bbox_xywh_to_xyxy(xywh):
"""
Convert bounding boxes from format (xmin, ymin, w, h) to (xmin, ymin, xmax, ymax)
Parameters:
----------
xywh : list, tuple or numpy.ndarray
The bbox in format (x, y, w, h).
If numpy.ndarray is provided, we expect multiple bounding boxes with
shape `(N, 4)`.
Returns:
-------
tuple or np.ndarray
The converted bboxes in format (xmin, ymin, xmax, ymax).
If input is numpy.ndarray, return is numpy.ndarray correspondingly.
"""
if isinstance(xywh, (tuple, list)):
if not len(xywh) == 4:
raise IndexError("Bounding boxes must have 4 elements, given {}".format(len(xywh)))
w, h = np.maximum(xywh[2] - 1, 0), np.maximum(xywh[3] - 1, 0)
return xywh[0], xywh[1], xywh[0] + w, xywh[1] + h
elif isinstance(xywh, np.ndarray):
if not xywh.size % 4 == 0:
raise IndexError("Bounding boxes must have n * 4 elements, given {}".format(xywh.shape))
xyxy = np.hstack((xywh[:, :2], xywh[:, :2] + np.maximum(0, xywh[:, 2:4] - 1)))
return xyxy
else:
raise TypeError("Expect input xywh a list, tuple or numpy.ndarray, given {}".format(type(xywh)))
# ---------------------------------------------------------------------------------------------------------------------
class CocoDetValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
self.image_size = self.ds_metainfo.input_image_size
self._height = self.image_size[0]
self._width = self.image_size[1]
self._mean = np.array(ds_metainfo.mean_rgb, dtype=np.float32).reshape(1, 1, 3)
self._std = np.array(ds_metainfo.std_rgb, dtype=np.float32).reshape(1, 1, 3)
def __call__(self, src, label):
# resize
img, bbox = src, label
input_h, input_w = self._height, self._width
h, w, _ = src.shape
s = max(h, w) * 1.0
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = self.get_affine_transform(c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
output_w = input_w
output_h = input_h
trans_output = self.get_affine_transform(c, s, 0, [output_w, output_h])
for i in range(bbox.shape[0]):
bbox[i, :2] = self.affine_transform(bbox[i, :2], trans_output)
bbox[i, 2:4] = self.affine_transform(bbox[i, 2:4], trans_output)
bbox[:, :2] = np.clip(bbox[:, :2], 0, output_w - 1)
bbox[:, 2:4] = np.clip(bbox[:, 2:4], 0, output_h - 1)
img = inp
# to tensor
img = img.astype(np.float32) / 255.0
img = (img - self._mean) / self._std
img = img.transpose(2, 0, 1).astype(np.float32)
img = img
return img, bbox.astype(img.dtype)
@staticmethod
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
"""
Get affine transform matrix given center, scale and rotation.
Parameters:
----------
center : tuple of float
Center point.
scale : float
Scaling factor.
rot : float
Rotation degree.
output_size : tuple of int
(width, height) of the output size.
shift : float
Shift factor.
inv : bool
Whether inverse the computation.
Returns:
-------
numpy.ndarray
Affine matrix.
"""
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = CocoDetValTransform.get_rot_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = CocoDetValTransform.get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = CocoDetValTransform.get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
@staticmethod
def get_rot_dir(src_point, rot_rad):
"""
Get rotation direction.
Parameters:
----------
src_point : tuple of float
Original point.
rot_rad : float
Rotation radian.
Returns:
-------
tuple of float
Rotation.
"""
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
@staticmethod
def get_3rd_point(a, b):
"""
Get the 3rd point position given first two points.
Parameters:
----------
a : tuple of float
First point.
b : tuple of float
Second point.
Returns:
-------
tuple of float
Third point.
"""
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
@staticmethod
def affine_transform(pt, t):
"""
Apply affine transform to a bounding box given transform matrix t.
Parameters:
----------
pt : numpy.ndarray
Bounding box with shape (1, 2).
t : numpy.ndarray
Transformation matrix with shape (2, 3).
Returns:
-------
numpy.ndarray
New bounding box with shape (1, 2).
"""
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
class Tuple(object):
"""
Wrap multiple batchify functions to form a function apply each input function on each
input fields respectively.
"""
def __init__(self, fn, *args):
if isinstance(fn, (list, tuple)):
self._fn = fn
else:
self._fn = (fn,) + args
def __call__(self, data):
"""
Batchify the input data.
Parameters:
----------
data : list
The samples to batchfy. Each sample should contain N attributes.
Returns:
-------
tuple
A tuple of length N. Contains the batchified result of each attribute in the input.
"""
ret = []
for i, ele_fn in enumerate(self._fn):
ret.append(ele_fn([ele[i] for ele in data]))
return tuple(ret)
class Stack(object):
"""
Stack the input data samples to construct the batch.
"""
def __call__(self, data):
"""
Batchify the input data.
Parameters:
----------
data : list
The input data samples
Returns:
-------
NDArray
Result.
"""
return self._stack_arrs(data, True)
@staticmethod
def _stack_arrs(arrs, use_shared_mem=False):
"""
Internal imple for stacking arrays.
"""
if isinstance(arrs[0], mx.nd.NDArray):
if use_shared_mem:
out = mx.nd.empty((len(arrs),) + arrs[0].shape, dtype=arrs[0].dtype,
ctx=mx.Context("cpu_shared", 0))
return mx.nd.stack(*arrs, out=out)
else:
return mx.nd.stack(*arrs)
else:
out = np.asarray(arrs)
if use_shared_mem:
return mx.nd.array(out, ctx=mx.Context("cpu_shared", 0))
else:
return mx.nd.array(out)
class Pad(object):
"""
Pad the input ndarrays along the specific padding axis and stack them to get the output.
"""
def __init__(self, axis=0, pad_val=0, num_shards=1, ret_length=False):
self._axis = axis
self._pad_val = pad_val
self._num_shards = num_shards
self._ret_length = ret_length
def __call__(self, data):
"""
Batchify the input data.
Parameters:
----------
data : list
A list of N samples. Each sample can be 1) ndarray or
2) a list/tuple of ndarrays
Returns:
-------
NDArray
Data in the minibatch. Shape is (N, ...)
NDArray, optional
The sequences' original lengths at the padded axis. Shape is (N,). This will only be
returned in `ret_length` is True.
"""
if isinstance(data[0], (mx.nd.NDArray, np.ndarray, list)):
padded_arr, original_length = self._pad_arrs_to_max_length(
data, self._axis, self._pad_val, self._num_shards, True)
if self._ret_length:
return padded_arr, original_length
else:
return padded_arr
else:
raise NotImplementedError
@staticmethod
def _pad_arrs_to_max_length(arrs, pad_axis, pad_val, num_shards=1, use_shared_mem=False):
"""
Inner Implementation of the Pad batchify.
"""
if not isinstance(arrs[0], (mx.nd.NDArray, np.ndarray)):
arrs = [np.asarray(ele) for ele in arrs]
if isinstance(pad_axis, tuple):
original_length = []
for axis in pad_axis:
original_length.append(np.array([ele.shape[axis] for ele in arrs]))
original_length = np.stack(original_length).T
else:
original_length = np.array([ele.shape[pad_axis] for ele in arrs])
pad_axis = [pad_axis]
if len(original_length) % num_shards != 0:
logging.warning(
'Batch size cannot be evenly split. Trying to shard %d items into %d shards',
len(original_length), num_shards)
original_length = np.array_split(original_length, num_shards)
max_lengths = [np.max(ll, axis=0, keepdims=len(pad_axis) == 1) for ll in original_length]
# add batch dimension
ret_shape = [[ll.shape[0], ] + list(arrs[0].shape) for ll in original_length]
for i, shape in enumerate(ret_shape):
for j, axis in enumerate(pad_axis):
shape[1 + axis] = max_lengths[i][j]
if use_shared_mem:
ret = [mx.nd.full(shape=tuple(shape), val=pad_val, ctx=mx.Context('cpu_shared', 0),
dtype=arrs[0].dtype) for shape in ret_shape]
original_length = [mx.nd.array(ll, ctx=mx.Context('cpu_shared', 0),
dtype=np.int32) for ll in original_length]
else:
ret = [mx.nd.full(shape=tuple(shape), val=pad_val, dtype=arrs[0].dtype) for shape in
ret_shape]
original_length = [mx.nd.array(ll, dtype=np.int32) for ll in original_length]
for i, arr in enumerate(arrs):
if ret[i // ret[0].shape[0]].shape[1:] == arr.shape:
ret[i // ret[0].shape[0]][i % ret[0].shape[0]] = arr
else:
slices = [slice(0, ll) for ll in arr.shape]
ret[i // ret[0].shape[0]][i % ret[0].shape[0]][tuple(slices)] = arr
if len(ret) == len(original_length) == 1:
return ret[0], original_length[0]
return ret, original_length
def get_post_transform(orig_w, orig_h, out_w, out_h):
"""Get the post prediction affine transforms. This will be used to adjust the prediction results
according to original coco image resolutions.
Parameters:
----------
orig_w : int
Original width of the image.
orig_h : int
Original height of the image.
out_w : int
Width of the output image after prediction.
out_h : int
Height of the output image after prediction.
Returns:
-------
numpy.ndarray
Affine transform matrix 3x2.
"""
s = max(orig_w, orig_h) * 1.0
c = np.array([orig_w / 2., orig_h / 2.], dtype=np.float32)
trans_output = CocoDetValTransform.get_affine_transform(c, s, 0, [out_w, out_h], inv=True)
return trans_output
class CocoDetMetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoDetMetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoDetDataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = CocoDetDataset.classes
self.input_image_size = (512, 512)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.mAP"]
self.test_metric_names = ["CocoDetMApMetric"]
self.test_metric_extra_kwargs = [
{"name": "mAP",
"img_height": 512,
"coco_annotations_file_path": None,
"contiguous_id_to_json": None,
"data_shape": None,
"post_affine": get_post_transform}]
self.test_dataset_extra_kwargs =\
{"skip_empty": False}
self.saver_acc_ind = 0
self.do_transform = True
self.do_transform_first = False
self.last_batch = "keep"
self.batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
self.val_transform = CocoDetValTransform
self.test_transform = CocoDetValTransform
self.ml_type = "hpe"
self.allow_hybridize = False
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoDetMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoDetMetaInfo, self).update(args)
self.input_image_size = args.input_size
self.test_metric_extra_kwargs[0]["img_height"] = self.input_image_size[0]
self.test_metric_extra_kwargs[0]["data_shape"] = self.input_image_size
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
self.test_metric_extra_kwargs[0]["contiguous_id_to_json"] = dataset.contiguous_id_to_json
| 27,185
| 35.688259
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/ade20k_seg_dataset.py
|
import os
import numpy as np
from PIL import Image
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class ADE20KSegDataset(SegDataset):
"""
ADE20K semantic segmentation dataset.
Parameters:
----------
root : str
Path to a folder with `ADEChallengeData2016` subfolder.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(ADE20KSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
base_dir_path = os.path.join(root, "ADEChallengeData2016")
assert os.path.exists(base_dir_path), "Please prepare dataset"
image_dir_path = os.path.join(base_dir_path, "images")
mask_dir_path = os.path.join(base_dir_path, "annotations")
mode_dir_name = "training" if mode == "train" else "validation"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_file_name in os.listdir(image_dir_path):
image_file_stem, _ = os.path.splitext(image_file_name)
if image_file_name.endswith(".jpg"):
image_file_path = os.path.join(image_dir_path, image_file_name)
mask_file_name = image_file_stem + ".png"
mask_file_path = os.path.join(mask_dir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(base_dir_path))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == "train":
image, mask = self._sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert self.mode == "test"
image, mask = self._img_transform(image), self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 150
vague_idx = 150
use_vague = True
background_idx = -1
ignore_bg = False
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask[np_mask == 0] = ADE20KSegDataset.vague_idx + 1
np_mask -= 1
return np_mask
def __len__(self):
return len(self.images)
class ADE20KMetaInfo(VOCMetaInfo):
def __init__(self):
super(ADE20KMetaInfo, self).__init__()
self.label = "ADE20K"
self.short_label = "voc"
self.root_dir_name = "ade20k"
self.dataset_class = ADE20KSegDataset
self.num_classes = ADE20KSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"macro_average": False},
{"num_classes": ADE20KSegDataset.classes,
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"bg_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg,
"macro_average": False}]
| 4,121
| 34.230769
| 93
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/dataset_metainfo.py
|
"""
Base dataset metainfo class.
"""
import os
class DatasetMetaInfo(object):
"""
Base descriptor of dataset.
"""
def __init__(self):
self.use_imgrec = False
self.label = None
self.root_dir_name = None
self.root_dir_path = None
self.dataset_class = None
self.dataset_class_extra_kwargs = None
self.num_training_samples = None
self.in_channels = None
self.num_classes = None
self.input_image_size = None
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.train_use_weighted_sampler = False
self.val_metric_capts = None
self.val_metric_names = None
self.val_metric_extra_kwargs = None
self.test_metric_capts = None
self.test_metric_names = None
self.test_metric_extra_kwargs = None
self.saver_acc_ind = None
self.ml_type = None
self.allow_hybridize = True
self.train_net_extra_kwargs = None
self.test_net_extra_kwargs = None
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
parser.add_argument(
"--data-dir",
type=str,
default=os.path.join(work_dir_path, self.root_dir_name),
help="path to directory with {} dataset".format(self.label))
parser.add_argument(
"--num-classes",
type=int,
default=self.num_classes,
help="number of classes")
parser.add_argument(
"--in-channels",
type=int,
default=self.in_channels,
help="number of input channels")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
self.root_dir_path = args.data_dir
self.num_classes = args.num_classes
self.in_channels = args.in_channels
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
pass
| 2,733
| 27.778947
| 72
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/seg_dataset.py
|
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
import torch.utils.data as data
class SegDataset(data.Dataset):
"""
Segmentation base dataset.
Parameters:
----------
root : str
Path to the folder stored the dataset.
mode : str
'train', 'val', 'test', or 'demo'.
transform : func
A function that takes data and transforms it.
"""
def __init__(self,
root,
mode,
transform,
base_size=520,
crop_size=480):
assert (mode in ("train", "val", "test", "demo"))
self.root = root
self.mode = mode
self.transform = transform
self.base_size = base_size
self.crop_size = crop_size
def _val_sync_transform(self, image, mask):
outsize = self.crop_size
short_size = outsize
w, h = image.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = image.size
x1 = int(round(0.5 * (w - outsize)))
y1 = int(round(0.5 * (h - outsize)))
image = image.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
def _sync_transform(self, image, mask):
# random mirror
if random.random() < 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = image.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
image = image.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = image.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
image, mask = self._img_transform(image), self._mask_transform(mask)
return image, mask
@staticmethod
def _img_transform(image):
return np.array(image)
@staticmethod
def _mask_transform(mask):
return np.array(mask).astype(np.int32)
| 3,366
| 33.010101
| 89
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/coco_hpe2_dataset.py
|
"""
COCO keypoint detection (2D multiple human pose estimation) dataset (for Lightweight OpenPose).
"""
import os
import json
import math
import cv2
from operator import itemgetter
import numpy as np
import torch
import torch.utils.data as data
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe2Dataset(data.Dataset):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe2Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
with open(annotations_file_path, "r") as f:
self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
file_name = self.file_names[idx]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
img_mean = (128, 128, 128)
img_scale = 1.0 / 256
base_height = 368
stride = 8
pad_value = (0, 0, 0)
height, width, _ = image.shape
image = self.normalize(image, img_mean, img_scale)
ratio = base_height / float(image.shape[0])
image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_CUBIC)
min_dims = [base_height, max(image.shape[1], base_height)]
image, pad = self.pad_width(
image,
stride,
pad_value,
min_dims)
image = image.astype(np.float32)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
# if self.transform is not None:
# image = self.transform(image)
image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + [height, width], np.float32)
label = torch.from_numpy(label)
return image, label
@staticmethod
def normalize(img,
img_mean,
img_scale):
img = np.array(img, dtype=np.float32)
img = (img - img_mean) * img_scale
return img
@staticmethod
def pad_width(img,
stride,
pad_value,
min_dims):
h, w, _ = img.shape
h = min(min_dims[0], h)
min_dims[0] = math.ceil(min_dims[0] / float(stride)) * stride
min_dims[1] = max(min_dims[1], w)
min_dims[1] = math.ceil(min_dims[1] / float(stride)) * stride
top = int(math.floor((min_dims[0] - h) / 2.0))
left = int(math.floor((min_dims[1] - w) / 2.0))
bottom = int(min_dims[0] - h - top)
right = int(min_dims[1] - w - left)
pad = [top, left, bottom, right]
padded_img = cv2.copyMakeBorder(
src=img,
top=top,
bottom=bottom,
left=left,
right=right,
borderType=cv2.BORDER_CONSTANT,
value=pad_value)
return padded_img, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def extract_keypoints(heatmap,
all_keypoints,
total_keypoint_num):
heatmap[heatmap < 0.1] = 0
heatmap_with_borders = np.pad(heatmap, [(2, 2), (2, 2)], mode="constant")
heatmap_center = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 1:heatmap_with_borders.shape[1] - 1]
heatmap_left = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 2:heatmap_with_borders.shape[1]]
heatmap_right = heatmap_with_borders[1:heatmap_with_borders.shape[0] - 1, 0:heatmap_with_borders.shape[1] - 2]
heatmap_up = heatmap_with_borders[2:heatmap_with_borders.shape[0], 1:heatmap_with_borders.shape[1] - 1]
heatmap_down = heatmap_with_borders[0:heatmap_with_borders.shape[0] - 2, 1:heatmap_with_borders.shape[1] - 1]
heatmap_peaks = (heatmap_center > heatmap_left) &\
(heatmap_center > heatmap_right) &\
(heatmap_center > heatmap_up) &\
(heatmap_center > heatmap_down)
heatmap_peaks = heatmap_peaks[1:heatmap_center.shape[0] - 1, 1:heatmap_center.shape[1] - 1]
keypoints = list(zip(np.nonzero(heatmap_peaks)[1], np.nonzero(heatmap_peaks)[0])) # (w, h)
keypoints = sorted(keypoints, key=itemgetter(0))
suppressed = np.zeros(len(keypoints), np.uint8)
keypoints_with_score_and_id = []
keypoint_num = 0
for i in range(len(keypoints)):
if suppressed[i]:
continue
for j in range(i + 1, len(keypoints)):
if math.sqrt((keypoints[i][0] - keypoints[j][0]) ** 2 + (keypoints[i][1] - keypoints[j][1]) ** 2) < 6:
suppressed[j] = 1
keypoint_with_score_and_id = (
keypoints[i][0],
keypoints[i][1],
heatmap[keypoints[i][1], keypoints[i][0]],
total_keypoint_num + keypoint_num)
keypoints_with_score_and_id.append(keypoint_with_score_and_id)
keypoint_num += 1
all_keypoints.append(keypoints_with_score_and_id)
return keypoint_num
def group_keypoints(all_keypoints_by_type,
pafs,
pose_entry_size=20,
min_paf_score=0.05):
def linspace2d(start, stop, n=10):
points = 1 / (n - 1) * (stop - start)
return points[:, None] * np.arange(n) + start[:, None]
BODY_PARTS_KPT_IDS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11],
[11, 12], [12, 13], [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]]
BODY_PARTS_PAF_IDS = ([12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5],
[6, 7], [8, 9], [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19],
[26, 27])
pose_entries = []
all_keypoints = np.array([item for sublist in all_keypoints_by_type for item in sublist])
for part_id in range(len(BODY_PARTS_PAF_IDS)):
part_pafs = pafs[:, :, BODY_PARTS_PAF_IDS[part_id]]
kpts_a = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][0]]
kpts_b = all_keypoints_by_type[BODY_PARTS_KPT_IDS[part_id][1]]
num_kpts_a = len(kpts_a)
num_kpts_b = len(kpts_b)
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
if num_kpts_a == 0 and num_kpts_b == 0: # no keypoints for such body part
continue
elif num_kpts_a == 0: # body part has just 'b' keypoints
for i in range(num_kpts_b):
num = 0
for j in range(len(pose_entries)): # check if already in some pose, was added by another body part
if pose_entries[j][kpt_b_id] == kpts_b[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_b_id] = kpts_b[i][3] # keypoint idx
pose_entry[-1] = 1 # num keypoints in pose
pose_entry[-2] = kpts_b[i][2] # pose score
pose_entries.append(pose_entry)
continue
elif num_kpts_b == 0: # body part has just 'a' keypoints
for i in range(num_kpts_a):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == kpts_a[i][3]:
num += 1
continue
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = kpts_a[i][3]
pose_entry[-1] = 1
pose_entry[-2] = kpts_a[i][2]
pose_entries.append(pose_entry)
continue
connections = []
for i in range(num_kpts_a):
kpt_a = np.array(kpts_a[i][0:2])
for j in range(num_kpts_b):
kpt_b = np.array(kpts_b[j][0:2])
mid_point = [(), ()]
mid_point[0] = (int(round((kpt_a[0] + kpt_b[0]) * 0.5)),
int(round((kpt_a[1] + kpt_b[1]) * 0.5)))
mid_point[1] = mid_point[0]
vec = [kpt_b[0] - kpt_a[0], kpt_b[1] - kpt_a[1]]
vec_norm = math.sqrt(vec[0] ** 2 + vec[1] ** 2)
if vec_norm == 0:
continue
vec[0] /= vec_norm
vec[1] /= vec_norm
cur_point_score = (vec[0] * part_pafs[mid_point[0][1], mid_point[0][0], 0] +
vec[1] * part_pafs[mid_point[1][1], mid_point[1][0], 1])
height_n = pafs.shape[0] // 2
success_ratio = 0
point_num = 10 # number of points to integration over paf
if cur_point_score > -100:
passed_point_score = 0
passed_point_num = 0
x, y = linspace2d(kpt_a, kpt_b)
for point_idx in range(point_num):
px = int(round(x[point_idx]))
py = int(round(y[point_idx]))
paf = part_pafs[py, px, 0:2]
cur_point_score = vec[0] * paf[0] + vec[1] * paf[1]
if cur_point_score > min_paf_score:
passed_point_score += cur_point_score
passed_point_num += 1
success_ratio = passed_point_num / point_num
ratio = 0
if passed_point_num > 0:
ratio = passed_point_score / passed_point_num
ratio += min(height_n / vec_norm - 1, 0)
if ratio > 0 and success_ratio > 0.8:
score_all = ratio + kpts_a[i][2] + kpts_b[j][2]
connections.append([i, j, ratio, score_all])
if len(connections) > 0:
connections = sorted(connections, key=itemgetter(2), reverse=True)
num_connections = min(num_kpts_a, num_kpts_b)
has_kpt_a = np.zeros(num_kpts_a, dtype=np.int32)
has_kpt_b = np.zeros(num_kpts_b, dtype=np.int32)
filtered_connections = []
for row in range(len(connections)):
if len(filtered_connections) == num_connections:
break
i, j, cur_point_score = connections[row][0:3]
if not has_kpt_a[i] and not has_kpt_b[j]:
filtered_connections.append([kpts_a[i][3], kpts_b[j][3], cur_point_score])
has_kpt_a[i] = 1
has_kpt_b[j] = 1
connections = filtered_connections
if len(connections) == 0:
continue
if part_id == 0:
pose_entries = [np.ones(pose_entry_size) * -1 for _ in range(len(connections))]
for i in range(len(connections)):
pose_entries[i][BODY_PARTS_KPT_IDS[0][0]] = connections[i][0]
pose_entries[i][BODY_PARTS_KPT_IDS[0][1]] = connections[i][1]
pose_entries[i][-1] = 2
pose_entries[i][-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
elif part_id == 17 or part_id == 18:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0] and pose_entries[j][kpt_b_id] == -1:
pose_entries[j][kpt_b_id] = connections[i][1]
elif pose_entries[j][kpt_b_id] == connections[i][1] and pose_entries[j][kpt_a_id] == -1:
pose_entries[j][kpt_a_id] = connections[i][0]
continue
else:
kpt_a_id = BODY_PARTS_KPT_IDS[part_id][0]
kpt_b_id = BODY_PARTS_KPT_IDS[part_id][1]
for i in range(len(connections)):
num = 0
for j in range(len(pose_entries)):
if pose_entries[j][kpt_a_id] == connections[i][0]:
pose_entries[j][kpt_b_id] = connections[i][1]
num += 1
pose_entries[j][-1] += 1
pose_entries[j][-2] += all_keypoints[connections[i][1], 2] + connections[i][2]
if num == 0:
pose_entry = np.ones(pose_entry_size) * -1
pose_entry[kpt_a_id] = connections[i][0]
pose_entry[kpt_b_id] = connections[i][1]
pose_entry[-1] = 2
pose_entry[-2] = np.sum(all_keypoints[connections[i][0:2], 2]) + connections[i][2]
pose_entries.append(pose_entry)
filtered_entries = []
for i in range(len(pose_entries)):
if pose_entries[i][-1] < 3 or (pose_entries[i][-2] / pose_entries[i][-1] < 0.2):
continue
filtered_entries.append(pose_entries[i])
pose_entries = np.asarray(filtered_entries)
return pose_entries, all_keypoints
def convert_to_coco_format(pose_entries, all_keypoints):
coco_keypoints = []
scores = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
keypoints = [0] * 17 * 3
to_coco_map = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3]
person_score = pose_entries[n][-2]
position_id = -1
for keypoint_id in pose_entries[n][:-2]:
position_id += 1
if position_id == 1: # no 'neck' in COCO
continue
cx, cy, score, visibility = 0, 0, 0, 0 # keypoint not found
if keypoint_id != -1:
cx, cy, score = all_keypoints[int(keypoint_id), 0:3]
cx = cx + 0.5
cy = cy + 0.5
visibility = 1
keypoints[to_coco_map[position_id] * 3 + 0] = cx
keypoints[to_coco_map[position_id] * 3 + 1] = cy
keypoints[to_coco_map[position_id] * 3 + 2] = visibility
coco_keypoints.append(keypoints)
scores.append(person_score * max(0, (pose_entries[n][-1] - 1))) # -1 for 'neck'
return coco_keypoints, scores
def recalc_pose(pred,
label):
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
heights = label[:, 6].astype(np.int32)
widths = label[:, 7].astype(np.int32)
keypoints = 19
stride = 8
heatmap2ds = pred[:, :keypoints]
paf2ds = pred[:, keypoints:(3 * keypoints)]
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
height = int(heights[batch_i])
width = int(widths[batch_i])
heatmap2d = heatmap2ds[batch_i]
paf2d = paf2ds[batch_i]
heatmaps = np.transpose(heatmap2d, (1, 2, 0))
heatmaps = cv2.resize(heatmaps, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmaps = heatmaps[pad[0]:heatmaps.shape[0] - pad[2], pad[1]:heatmaps.shape[1] - pad[3]:, :]
heatmaps = cv2.resize(heatmaps, (width, height), interpolation=cv2.INTER_CUBIC)
pafs = np.transpose(paf2d, (1, 2, 0))
pafs = cv2.resize(pafs, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
pafs = pafs[pad[0]:pafs.shape[0] - pad[2], pad[1]:pafs.shape[1] - pad[3], :]
pafs = cv2.resize(pafs, (width, height), interpolation=cv2.INTER_CUBIC)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18): # 19th for bg
total_keypoints_num += extract_keypoints(
heatmaps[:, :, kpt_idx],
all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(
all_keypoints_by_type,
pafs)
coco_keypoints, scores = convert_to_coco_format(
pose_entries,
all_keypoints)
pred_pts_score.append(coco_keypoints)
pred_person_score.append(scores)
label_img_id_.append([label_img_id_i] * len(scores))
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score)[0], np.array(label_img_id_[0])
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe2MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe2Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (368, 368)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpe2ValTransform
self.test_transform = CocoHpe2ValTransform
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe2MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe2MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
| 20,780
| 39.747059
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/svhn_cls_dataset.py
|
"""
SVHN classification dataset.
"""
import os
from torchvision.datasets import SVHN
from .cifar10_cls_dataset import CIFAR10MetaInfo
class SVHNFine(SVHN):
"""
SVHN image classification dataset from http://ufldl.stanford.edu/housenumbers/.
Each sample is an image (in 3D NDArray) with shape (32, 32, 3).
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0`.
Parameters:
----------
root : str, default '~/.torch/datasets/svhn'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "svhn"),
mode="train",
transform=None):
super(SVHNFine, self).__init__(
root=root,
split=("train" if mode == "train" else "test"),
transform=transform,
download=True)
class SVHNMetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(SVHNMetaInfo, self).__init__()
self.label = "SVHN"
self.root_dir_name = "svhn"
self.dataset_class = SVHNFine
self.num_training_samples = 73257
| 1,364
| 30.022727
| 93
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/coco_hpe3_dataset.py
|
"""
COCO keypoint detection (2D multiple human pose estimation) dataset (for IBPPose).
"""
import os
# import json
import math
import cv2
import numpy as np
import torch
from torch.nn import functional as F
import torch.utils.data as data
from .dataset_metainfo import DatasetMetaInfo
class CocoHpe3Dataset(data.Dataset):
"""
COCO keypoint detection (2D multiple human pose estimation) dataset.
Parameters:
----------
root : string
Path to `annotations`, `train2017`, and `val2017` folders.
mode : string, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None):
super(CocoHpe3Dataset, self).__init__()
self._root = os.path.expanduser(root)
self.mode = mode
self.transform = transform
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "person_keypoints_" + mode_name + "2017.json")
# with open(annotations_file_path, "r") as f:
# self.file_names = json.load(f)["images"]
self.image_dir_path = os.path.join(root, mode_name + "2017")
self.annotations_file_path = annotations_file_path
from pycocotools.coco import COCO
self.coco_gt = COCO(self.annotations_file_path)
self.validation_ids = self.coco_gt.getImgIds()[:]
def __str__(self):
return self.__class__.__name__ + "(" + self._root + ")"
def __len__(self):
return len(self.validation_ids)
def __getitem__(self, idx):
# file_name = self.file_names[idx]["file_name"]
image_id = self.validation_ids[idx]
file_name = self.coco_gt.imgs[image_id]["file_name"]
image_file_path = os.path.join(self.image_dir_path, file_name)
image = cv2.imread(image_file_path, flags=cv2.IMREAD_COLOR)
# image = cv2.cvtColor(img, code=cv2.COLOR_BGR2RGB)
image_src_shape = image.shape[:2]
boxsize = 512
max_downsample = 64
pad_value = 128
scale = boxsize / image.shape[0]
if scale * image.shape[0] > 2600 or scale * image.shape[1] > 3800:
scale = min(2600 / image.shape[0], 3800 / image.shape[1])
image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
image, pad = self.pad_right_down_corner(image, max_downsample, pad_value)
image = np.float32(image / 255)
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
# image_id = int(os.path.splitext(os.path.basename(file_name))[0])
label = np.array([image_id, 1.0] + pad + list(image_src_shape), np.float32)
label = torch.from_numpy(label)
return image, label
@staticmethod
def pad_right_down_corner(img,
stride,
pad_value):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe2ValTransform(object):
def __init__(self,
ds_metainfo):
self.ds_metainfo = ds_metainfo
def __call__(self, src, label):
return src, label
def recalc_pose(pred,
label):
dt_gt_mapping = {0: 0, 1: None, 2: 6, 3: 8, 4: 10, 5: 5, 6: 7, 7: 9, 8: 12, 9: 14, 10: 16, 11: 11, 12: 13, 13: 15,
14: 2, 15: 1, 16: 4, 17: 3}
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne",
"Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
limb_from = ['neck', 'neck', 'neck', 'neck', 'neck', 'nose', 'nose', 'Reye', 'Leye', 'neck', 'Rsho', 'Relb', 'neck',
'Lsho', 'Lelb', 'neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'nose', 'nose', 'Rsho', 'Rhip', 'Lsho',
'Lhip', 'Rear', 'Lear', 'Rhip']
limb_to = ['nose', 'Reye', 'Leye', 'Rear', 'Lear', 'Reye', 'Leye', 'Rear', 'Lear', 'Rsho', 'Relb', 'Rwri', 'Lsho',
'Lelb', 'Lwri', 'Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Lsho', 'Rhip', 'Lkne', 'Lhip',
'Rkne', 'Rsho', 'Lsho', 'Lhip']
limb_from = [parts_dict[n] for n in limb_from]
limb_to = [parts_dict[n] for n in limb_to]
assert limb_from == [x for x in [
1, 1, 1, 1, 1, 0, 0, 14, 15, 1, 2, 3, 1, 5, 6, 1, 8, 9, 1, 11, 12, 0, 0, 2, 8, 5, 11, 16, 17, 8]]
assert limb_to == [x for x in [
0, 14, 15, 16, 17, 14, 15, 16, 17, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 2, 5, 8, 12, 11, 9, 2, 5, 11]]
limbs_conn = list(zip(limb_from, limb_to))
limb_seq = limbs_conn
paf_layers = 30
num_layers = 50
stride = 4
label_img_id = label[:, 0].astype(np.int32)
# label_score = label[:, 1]
pads = label[:, 2:6].astype(np.int32)
image_src_shapes = label[:, 6:8].astype(np.int32)
pred_pts_score = []
pred_person_score = []
label_img_id_ = []
batch = pred.shape[0]
for batch_i in range(batch):
label_img_id_i = label_img_id[batch_i]
pad = list(pads[batch_i])
image_src_shape = list(image_src_shapes[batch_i])
output_blob = pred[batch_i].transpose((1, 2, 0))
output_paf = output_blob[:, :, :paf_layers]
output_heatmap = output_blob[:, :, paf_layers:num_layers]
heatmap = cv2.resize(output_heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
heatmap = cv2.resize(heatmap, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
paf = cv2.resize(output_paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
paf = paf[
pad[0]:(output_blob.shape[0] * stride - pad[2]),
pad[1]:(output_blob.shape[1] * stride - pad[3]),
:]
paf = cv2.resize(paf, (image_src_shape[1], image_src_shape[0]), interpolation=cv2.INTER_CUBIC)
all_peaks = find_peaks(heatmap)
connection_all, special_k = find_connections(all_peaks, paf, image_src_shape[0], limb_seq)
subset, candidate = find_people(connection_all, special_k, all_peaks, limb_seq)
for s in subset[..., 0]:
keypoint_indexes = s[:18]
person_keypoint_coordinates = []
for index in keypoint_indexes:
if index == -1:
X, Y, C = 0, 0, 0
else:
X, Y, C = list(candidate[index.astype(int)][:2]) + [1]
person_keypoint_coordinates.append([X, Y, C])
person_keypoint_coordinates_coco = [None] * 17
for dt_index, gt_index in dt_gt_mapping.items():
if gt_index is None:
continue
person_keypoint_coordinates_coco[gt_index] = person_keypoint_coordinates[dt_index]
pred_pts_score.append(person_keypoint_coordinates_coco)
pred_person_score.append(1 - 1.0 / s[18])
label_img_id_.append(label_img_id_i)
return np.array(pred_pts_score).reshape((-1, 17, 3)), np.array(pred_person_score), np.array(label_img_id_)
def find_peaks(heatmap_avg):
thre1 = 0.1
offset_radius = 2
all_peaks = []
peak_counter = 0
heatmap_avg = heatmap_avg.astype(np.float32)
filter_map = heatmap_avg[:, :, :18].copy().transpose((2, 0, 1))[None, ...]
filter_map = torch.from_numpy(filter_map).cuda()
filter_map = keypoint_heatmap_nms(filter_map, kernel=3, thre=thre1)
filter_map = filter_map.cpu().numpy().squeeze().transpose((1, 2, 0))
for part in range(18):
map_ori = heatmap_avg[:, :, part]
peaks_binary = filter_map[:, :, part]
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
refined_peaks_with_score = [refine_centroid(map_ori, anchor, offset_radius) for anchor in peaks]
id = range(peak_counter, peak_counter + len(refined_peaks_with_score))
peaks_with_score_and_id = [refined_peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
return all_peaks
def keypoint_heatmap_nms(heat, kernel=3, thre=0.1):
# keypoint NMS on heatmap (score map)
pad = (kernel - 1) // 2
pad_heat = F.pad(heat, (pad, pad, pad, pad), mode="reflect")
hmax = F.max_pool2d(pad_heat, (kernel, kernel), stride=1, padding=0)
keep = (hmax == heat).float() * (heat >= thre).float()
return heat * keep
def refine_centroid(scorefmp, anchor, radius):
"""
Refine the centroid coordinate. It dose not affect the results after testing.
:param scorefmp: 2-D numpy array, original regressed score map
:param anchor: python tuple, (x,y) coordinates
:param radius: int, range of considered scores
:return: refined anchor, refined score
"""
x_c, y_c = anchor
x_min = x_c - radius
x_max = x_c + radius + 1
y_min = y_c - radius
y_max = y_c + radius + 1
if y_max > scorefmp.shape[0] or y_min < 0 or x_max > scorefmp.shape[1] or x_min < 0:
return anchor + (scorefmp[y_c, x_c], )
score_box = scorefmp[y_min:y_max, x_min:x_max]
x_grid, y_grid = np.mgrid[-radius:radius + 1, -radius:radius + 1]
offset_x = (score_box * x_grid).sum() / score_box.sum()
offset_y = (score_box * y_grid).sum() / score_box.sum()
x_refine = x_c + offset_x
y_refine = y_c + offset_y
refined_anchor = (x_refine, y_refine)
return refined_anchor + (score_box.mean(),)
def find_connections(all_peaks, paf_avg, image_width, limb_seq):
mid_num_ = 20
thre2 = 0.1
connect_ration = 0.8
connection_all = []
special_k = []
for k in range(len(limb_seq)):
score_mid = paf_avg[:, :, k]
candA = all_peaks[limb_seq[k][0]]
candB = all_peaks[limb_seq[k][1]]
nA = len(candA)
nB = len(candB)
if nA != 0 and nB != 0:
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
mid_num = min(int(round(norm + 1)), mid_num_)
if norm == 0:
continue
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
limb_response = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0]))] for
I in range(len(startend))])
score_midpts = limb_response
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * image_width / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > thre2)[0]) >= connect_ration * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([
i,
j,
score_with_dist_prior,
norm,
0.5 * score_with_dist_prior + 0.25 * candA[i][2] + 0.25 * candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[4], reverse=True)
connection = np.zeros((0, 6))
for c in range(len(connection_candidate)):
i, j, s, limb_len = connection_candidate[c][0:4]
if i not in connection[:, 3] and j not in connection[:, 4]:
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j, limb_len]])
if len(connection) >= min(nA, nB):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
return connection_all, special_k
def find_people(connection_all, special_k, all_peaks, limb_seq):
len_rate = 16.0
connection_tole = 0.7
remove_recon = 0
subset = -1 * np.ones((0, 20, 2))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(limb_seq)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limb_seq[k])
for i in range(len(connection_all[k])):
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)):
if subset[j][indexA][0].astype(int) == (partAs[i]).astype(int) or subset[j][indexB][0].astype(
int) == partBs[i].astype(int):
if found >= 2:
continue
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB][0].astype(int) == -1 and\
len_rate * subset[j][-1][1] > connection_all[k][i][-1]:
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-1][0] += 1
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) != partBs[i].astype(int):
if subset[j][indexB][1] >= connection_all[k][i][2]:
pass
else:
if len_rate * subset[j][-1][1] <= connection_all[k][i][-1]:
continue
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
elif subset[j][indexB][0].astype(int) == partBs[i].astype(int) and\
subset[j][indexB][1] <= connection_all[k][i][2]:
subset[j][-2][0] -= candidate[subset[j][indexB][0].astype(int), 2] + subset[j][indexB][1]
subset[j][indexB][0] = partBs[i]
subset[j][indexB][1] = connection_all[k][i][2]
subset[j][-2][0] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
subset[j][-1][1] = max(connection_all[k][i][-1], subset[j][-1][1])
else:
pass
elif found == 2:
j1, j2 = subset_idx
membership1 = ((subset[j1][..., 0] >= 0).astype(int))[:-2]
membership2 = ((subset[j2][..., 0] >= 0).astype(int))[:-2]
membership = membership1 + membership2
if len(np.nonzero(membership == 2)[0]) == 0:
min_limb1 = np.min(subset[j1, :-2, 1][membership1 == 1])
min_limb2 = np.min(subset[j2, :-2, 1][membership2 == 1])
min_tolerance = min(min_limb1, min_limb2)
if connection_all[k][i][2] < connection_tole * min_tolerance or\
len_rate * subset[j1][-1][1] <= connection_all[k][i][-1]:
continue
subset[j1][:-2][...] += (subset[j2][:-2][...] + 1)
subset[j1][-2:][:, 0] += subset[j2][-2:][:, 0]
subset[j1][-2][0] += connection_all[k][i][2]
subset[j1][-1][1] = max(connection_all[k][i][-1], subset[j1][-1][1])
subset = np.delete(subset, j2, 0)
else:
if connection_all[k][i][0] in subset[j1, :-2, 0]:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][0])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][1])
else:
c1 = np.where(subset[j1, :-2, 0] == connection_all[k][i][1])
c2 = np.where(subset[j2, :-2, 0] == connection_all[k][i][0])
c1 = int(c1[0])
c2 = int(c2[0])
assert c1 != c2, "an candidate keypoint is used twice, shared by two people"
if connection_all[k][i][2] < subset[j1][c1][1] and connection_all[k][i][2] < subset[j2][c2][1]:
continue
small_j = j1
remove_c = c1
if subset[j1][c1][1] > subset[j2][c2][1]:
small_j = j2
remove_c = c2
if remove_recon > 0:
subset[small_j][-2][0] -= candidate[subset[small_j][remove_c][0].astype(int), 2] + \
subset[small_j][remove_c][1]
subset[small_j][remove_c][0] = -1
subset[small_j][remove_c][1] = -1
subset[small_j][-1][0] -= 1
elif not found and k < len(limb_seq):
row = -1 * np.ones((20, 2))
row[indexA][0] = partAs[i]
row[indexA][1] = connection_all[k][i][2]
row[indexB][0] = partBs[i]
row[indexB][1] = connection_all[k][i][2]
row[-1][0] = 2
row[-1][1] = connection_all[k][i][-1]
row[-2][0] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
row = row[np.newaxis, :, :]
subset = np.concatenate((subset, row), axis=0)
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1][0] < 2 or subset[i][-2][0] / subset[i][-1][0] < 0.45:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
return subset, candidate
# ---------------------------------------------------------------------------------------------------------------------
class CocoHpe3MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CocoHpe3MetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoHpe3Dataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = 17
self.input_image_size = (256, 256)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_capts = ["Val.CocoOksAp"]
self.test_metric_names = ["CocoHpeOksApMetric"]
self.test_metric_extra_kwargs = [
{"name": "OksAp",
"coco_annotations_file_path": None,
"validation_ids": None,
"use_file": False,
"pose_postprocessing_fn": lambda x, y: recalc_pose(x, y)}]
self.saver_acc_ind = 0
self.do_transform = True
self.val_transform = CocoHpe2ValTransform
self.test_transform = CocoHpe2ValTransform
self.ml_type = "hpe"
self.net_extra_kwargs = {}
self.mean_rgb = (0.485, 0.456, 0.406)
self.std_rgb = (0.229, 0.224, 0.225)
self.load_ignore_extra = False
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for ImageNet-1K dataset metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(CocoHpe3MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--input-size",
type=int,
nargs=2,
default=self.input_image_size,
help="size of the input for model")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
def update(self,
args):
"""
Update ImageNet-1K dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CocoHpe3MetaInfo, self).update(args)
self.input_image_size = args.input_size
self.load_ignore_extra = args.load_ignore_extra
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
self.test_metric_extra_kwargs[0]["coco_annotations_file_path"] = dataset.annotations_file_path
# self.test_metric_extra_kwargs[0]["validation_ids"] = dataset.validation_ids
| 23,180
| 40.101064
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/asr_dataset.py
|
"""
Automatic Speech Recognition (ASR) abstract dataset.
"""
__all__ = ['AsrDataset', 'asr_test_transform']
import torch.utils.data as data
import torchvision.transforms as transforms
from pytorch.pytorchcv.models.jasper import NemoAudioReader
class AsrDataset(data.Dataset):
"""
Automatic Speech Recognition (ASR) abstract dataset.
Parameters:
----------
root : str
Path to the folder stored the dataset.
mode : str
'train', 'val', 'test', or 'demo'.
transform : func
A function that takes data and transforms it.
"""
def __init__(self,
root,
mode,
transform):
super(AsrDataset, self).__init__()
assert (mode in ("train", "val", "test", "demo"))
self.root = root
self.mode = mode
self.transform = transform
self.data = []
self.audio_reader = NemoAudioReader()
def __getitem__(self, index):
wav_file_path, label_text = self.data[index]
audio_data = self.audio_reader.read_from_file(wav_file_path)
audio_len = audio_data.shape[0]
return (audio_data, audio_len), label_text
def __len__(self):
return len(self.data)
def asr_test_transform(ds_metainfo):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor(),
])
| 1,385
| 25.653846
| 68
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/cifar10_cls_dataset.py
|
"""
CIFAR-10 classification dataset.
"""
import os
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from .dataset_metainfo import DatasetMetaInfo
class CIFAR10Fine(CIFAR10):
"""
CIFAR-10 image classification dataset.
Parameters:
----------
root : str, default '~/.torch/datasets/cifar10'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "cifar10"),
mode="train",
transform=None):
super(CIFAR10Fine, self).__init__(
root=root,
train=(mode == "train"),
transform=transform,
download=True)
class CIFAR10MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CIFAR10MetaInfo, self).__init__()
self.label = "CIFAR10"
self.short_label = "cifar"
self.root_dir_name = "cifar10"
self.dataset_class = CIFAR10Fine
self.num_training_samples = 50000
self.in_channels = 3
self.num_classes = 10
self.input_image_size = (32, 32)
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.train_transform = cifar10_train_transform
self.val_transform = cifar10_val_transform
self.test_transform = cifar10_val_transform
self.ml_type = "imgcls"
def cifar10_train_transform(ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010),
jitter_param=0.4):
assert (ds_metainfo is not None)
assert (ds_metainfo.input_image_size[0] == 32)
return transforms.Compose([
transforms.RandomCrop(
size=32,
padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
def cifar10_val_transform(ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010)):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
| 2,897
| 30.5
| 73
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/librispeech_asr_dataset.py
|
"""
LibriSpeech ASR dataset.
"""
__all__ = ['LibriSpeech', 'LibriSpeechMetaInfo']
import os
import numpy as np
from .dataset_metainfo import DatasetMetaInfo
from .asr_dataset import AsrDataset, asr_test_transform
class LibriSpeech(AsrDataset):
"""
LibriSpeech dataset for Automatic Speech Recognition (ASR).
Parameters:
----------
root : str, default '~/.torch/datasets/LibriSpeech'
Path to the folder stored the dataset.
mode : str, default 'test'
'train', 'val', 'test', or 'demo'.
subset : str, default 'dev-clean'
Data subset.
transform : function, default None
A function that takes data and transforms it.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "LibriSpeech"),
mode="test",
subset="dev-clean",
transform=None):
super(LibriSpeech, self).__init__(
root=root,
mode=mode,
transform=transform)
self.vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)}
import soundfile
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
data_dir_path = os.path.join(root_dir_path, subset)
assert os.path.exists(data_dir_path)
for speaker_id in os.listdir(data_dir_path):
speaker_dir_path = os.path.join(data_dir_path, speaker_id)
for chapter_id in os.listdir(speaker_dir_path):
chapter_dir_path = os.path.join(speaker_dir_path, chapter_id)
transcript_file_path = os.path.join(chapter_dir_path, "{}-{}.trans.txt".format(speaker_id, chapter_id))
with open(transcript_file_path, "r") as f:
transcripts = dict(x.split(" ", maxsplit=1) for x in f.readlines())
for flac_file_name in os.listdir(chapter_dir_path):
if flac_file_name.endswith(".flac"):
wav_file_name = flac_file_name.replace(".flac", ".wav")
wav_file_path = os.path.join(chapter_dir_path, wav_file_name)
if not os.path.exists(wav_file_path):
flac_file_path = os.path.join(chapter_dir_path, flac_file_name)
pcm, sample_rate = soundfile.read(flac_file_path)
soundfile.write(wav_file_path, pcm, sample_rate)
text = transcripts[wav_file_name.replace(".wav", "")]
text = text.strip("\n ").lower()
text = np.array([vocabulary_dict[c] for c in text], dtype=np.long)
self.data.append((wav_file_path, text))
class LibriSpeechMetaInfo(DatasetMetaInfo):
def __init__(self):
super(LibriSpeechMetaInfo, self).__init__()
self.label = "LibriSpeech"
self.short_label = "ls"
self.root_dir_name = "LibriSpeech"
self.dataset_class = LibriSpeech
self.dataset_class_extra_kwargs = {"subset": "dev-clean"}
self.ml_type = "asr"
self.num_classes = 29
self.val_metric_extra_kwargs = [{"vocabulary": None}]
self.val_metric_capts = ["Val.WER"]
self.val_metric_names = ["WER"]
self.test_metric_extra_kwargs = [{"vocabulary": None}]
self.test_metric_capts = ["Test.WER"]
self.test_metric_names = ["WER"]
self.val_transform = asr_test_transform
self.test_transform = asr_test_transform
self.test_net_extra_kwargs = {"from_audio": True}
self.saver_acc_ind = 0
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(LibriSpeechMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--subset",
type=str,
default="dev-clean",
help="data subset")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(LibriSpeechMetaInfo, self).update(args)
self.dataset_class_extra_kwargs["subset"] = args.subset
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
vocabulary = dataset.vocabulary
self.num_classes = len(vocabulary) + 1
self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary
self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
| 5,294
| 37.369565
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/cub200_2011_cls_dataset.py
|
"""
CUB-200-2011 classification dataset.
"""
import os
import numpy as np
import pandas as pd
from PIL import Image
import torch.utils.data as data
from .imagenet1k_cls_dataset import ImageNet1KMetaInfo
class CUB200_2011(data.Dataset):
"""
CUB-200-2011 fine-grained classification dataset.
Parameters:
----------
root : str, default '~/.torch/datasets/CUB_200_2011'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and transforms it.
target_transform : function, default None
A function that takes label and transforms it.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "CUB_200_2011"),
mode="train",
transform=None,
target_transform=None):
super(CUB200_2011, self).__init__()
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
images_file_name = "images.txt"
images_file_path = os.path.join(root_dir_path, images_file_name)
if not os.path.exists(images_file_path):
raise Exception("Images file doesn't exist: {}".format(images_file_name))
class_file_name = "image_class_labels.txt"
class_file_path = os.path.join(root_dir_path, class_file_name)
if not os.path.exists(class_file_path):
raise Exception("Image class file doesn't exist: {}".format(class_file_name))
split_file_name = "train_test_split.txt"
split_file_path = os.path.join(root_dir_path, split_file_name)
if not os.path.exists(split_file_path):
raise Exception("Split file doesn't exist: {}".format(split_file_name))
images_df = pd.read_csv(
images_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "image_path"],
dtype={"image_id": np.int32, "image_path": np.unicode})
class_df = pd.read_csv(
class_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "class_id"],
dtype={"image_id": np.int32, "class_id": np.uint8})
split_df = pd.read_csv(
split_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "split_flag"],
dtype={"image_id": np.int32, "split_flag": np.uint8})
df = images_df.join(class_df, rsuffix="_class_df").join(split_df, rsuffix="_split_df")
split_flag = 1 if mode == "train" else 0
subset_df = df[df.split_flag == split_flag]
self.image_ids = subset_df["image_id"].values.astype(np.int32)
self.class_ids = subset_df["class_id"].values.astype(np.int32) - 1
self.image_file_names = subset_df["image_path"].values.astype(np.unicode)
images_dir_name = "images"
self.images_dir_path = os.path.join(root_dir_path, images_dir_name)
assert os.path.exists(self.images_dir_path)
self._transform = transform
self._target_transform = target_transform
def __getitem__(self, index):
image_file_name = self.image_file_names[index]
image_file_path = os.path.join(self.images_dir_path, image_file_name)
img = Image.open(image_file_path).convert("RGB")
label = int(self.class_ids[index])
if self._transform is not None:
img = self._transform(img)
if self._target_transform is not None:
label = self._target_transform(label)
return img, label
def __len__(self):
return len(self.image_ids)
class CUB200MetaInfo(ImageNet1KMetaInfo):
def __init__(self):
super(CUB200MetaInfo, self).__init__()
self.label = "CUB200_2011"
self.short_label = "cub"
self.root_dir_name = "CUB_200_2011"
self.dataset_class = CUB200_2011
self.num_training_samples = None
self.num_classes = 200
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.net_extra_kwargs = {"aux": False}
self.load_ignore_extra = True
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(CUB200MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--no-aux",
dest="no_aux",
action="store_true",
help="no `aux` mode in model")
def update(self,
args):
"""
Update CUB-200-2011 dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CUB200MetaInfo, self).update(args)
if args.no_aux:
self.net_extra_kwargs = None
self.load_ignore_extra = False
| 5,320
| 34.711409
| 94
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/mcv_asr_dataset.py
|
"""
Mozilla Common Voice ASR dataset.
"""
__all__ = ['McvDataset', 'McvMetaInfo']
import os
import re
import numpy as np
import pandas as pd
from .dataset_metainfo import DatasetMetaInfo
from .asr_dataset import AsrDataset, asr_test_transform
class McvDataset(AsrDataset):
"""
Mozilla Common Voice dataset for Automatic Speech Recognition (ASR).
Parameters:
----------
root : str, default '~/.torch/datasets/mcv'
Path to the folder stored the dataset.
mode : str, default 'test'
'train', 'val', 'test', or 'demo'.
lang : str, default 'en'
Language.
subset : str, default 'dev'
Data subset.
transform : function, default None
A function that takes data and transforms it.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "mcv"),
mode="test",
lang="en",
subset="dev",
transform=None):
super(McvDataset, self).__init__(
root=root,
mode=mode,
transform=transform)
assert (lang in ("en", "fr", "de", "it", "es", "ca", "pl", "ru", "ru34"))
self.vocabulary = self.get_vocabulary_for_lang(lang=lang)
desired_audio_sample_rate = 16000
vocabulary_dict = {c: i for i, c in enumerate(self.vocabulary)}
import soundfile
import librosa
from librosa.core import resample as lr_resample
import unicodedata
import unidecode
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
lang_ = lang if lang != "ru34" else "ru"
data_dir_path = os.path.join(root_dir_path, lang_)
assert os.path.exists(data_dir_path)
metainfo_file_path = os.path.join(data_dir_path, subset + ".tsv")
assert os.path.exists(metainfo_file_path)
metainfo_df = pd.read_csv(
metainfo_file_path,
sep="\t",
header=0,
index_col=False)
metainfo_df = metainfo_df[["path", "sentence"]]
self.data_paths = metainfo_df["path"].values
self.data_sentences = metainfo_df["sentence"].values
clips_dir_path = os.path.join(data_dir_path, "clips")
assert os.path.exists(clips_dir_path)
for clip_file_name, sentence in zip(self.data_paths, self.data_sentences):
mp3_file_path = os.path.join(clips_dir_path, clip_file_name)
assert os.path.exists(mp3_file_path)
wav_file_name = clip_file_name.replace(".mp3", ".wav")
wav_file_path = os.path.join(clips_dir_path, wav_file_name)
# print("==> {}".format(sentence))
text = sentence.lower()
if lang == "en":
text = re.sub("\.|-|–|—", " ", text)
text = re.sub("&", " and ", text)
text = re.sub("ō", "o", text)
text = re.sub("â|á", "a", text)
text = re.sub("é", "e", text)
text = re.sub(",|;|:|!|\?|\"|“|”|‘|’|\(|\)", "", text)
text = re.sub("\s+", " ", text)
text = re.sub(" '", " ", text)
text = re.sub("' ", " ", text)
elif lang == "fr":
text = "".join(c for c in text if unicodedata.combining(c) == 0)
text = re.sub("\.|-|–|—|=|×|\*|†|/|ቀ|_|…", " ", text)
text = re.sub(",|;|:|!|\?|ʻ|“|”|\"|„|«|»|\(|\)", "", text)
text = re.sub("먹|삼|생|고|기|집|\$|ʔ|の|ひ", "", text)
text = re.sub("’|´", "'", text)
text = re.sub("&", " and ", text)
text = re.sub("œ", "oe", text)
text = re.sub("æ", "ae", text)
text = re.sub("á|ā|ã|ä|ą|ă|å", "a", text)
text = re.sub("ö|ō|ó|ð|ổ|ø", "o", text)
text = re.sub("ē|ė|ę", "e", text)
text = re.sub("í|ī", "i", text)
text = re.sub("ú|ū", "u", text)
text = re.sub("ý", "y", text)
text = re.sub("š|ś|ș|ş", "s", text)
text = re.sub("ž|ź|ż", "z", text)
text = re.sub("ñ|ń|ṇ", "n", text)
text = re.sub("ł|ľ", "l", text)
text = re.sub("ć|č", "c", text)
text = re.sub("я", "ya", text)
text = re.sub("ř", "r", text)
text = re.sub("đ", "d", text)
text = re.sub("ț", "t", text)
text = re.sub("þ", "th", text)
text = re.sub("ğ", "g", text)
text = re.sub("ß", "ss", text)
text = re.sub("µ", "mu", text)
text = re.sub("\s+", " ", text)
elif lang == "de":
text = re.sub("\.|-|–|—|/|_|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|'|‘|’|ʻ|ʿ|‚|“|”|\"|„|«|»|›|‹|\(|\)", "", text)
text = re.sub("°|幺|乡|辶", "", text)
text = re.sub("&", " and ", text)
text = re.sub("ə", "a", text)
text = re.sub("æ", "ae", text)
text = re.sub("å|ā|á|ã|ă|â|ą", "a", text)
text = re.sub("ó|ð|ø|ọ|ő|ō|ô", "o", text)
text = re.sub("é|ë|ê|ě|ę", "e", text)
text = re.sub("ū|ứ", "u", text)
text = re.sub("í|ï|ı", "i", text)
text = re.sub("š|ș|ś|ş", "s", text)
text = re.sub("č|ć", "c", text)
text = re.sub("đ", "d", text)
text = re.sub("ğ", "g", text)
text = re.sub("ł", "l", text)
text = re.sub("ř", "r", text)
text = re.sub("ñ", "n", text)
text = re.sub("ț", "t", text)
text = re.sub("ž|ź", "z", text)
text = re.sub("\s+", " ", text)
elif lang == "it":
text = re.sub("\.|-|–|—|/|_|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)", "", text)
text = re.sub("\$|#|禅", "", text)
text = re.sub("’|`", "'", text)
text = re.sub("ə", "a", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang == "es":
text = re.sub("\.|-|–|—|/|=|_|{|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)|¿|¡", "", text)
text = re.sub("蝦|夷", "", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang == "ca":
text = re.sub("\.|-|–|—|/|=|_|·|@|\+|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)|¿|¡", "", text)
text = re.sub("ঃ|ং", "", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang == "pl":
text = re.sub("\.|-|–|—|/|=|_|·|@|\+|…", " ", text)
text = re.sub(",|;|:|!|\?|\"|“|”|\"|„|«|»|›|‹|<|>|\(|\)", "", text)
text = re.sub("q", "k", text)
text = re.sub("x", "ks", text)
text = re.sub("v", "w", text)
text = "".join((c if c in self.vocabulary else unidecode.unidecode(c)) for c in text)
text = re.sub("\s+", " ", text)
elif lang in ("ru", "ru34"):
text = re.sub("по-", "по", text)
text = re.sub("во-", "во", text)
text = re.sub("-то", "то", text)
text = re.sub("\.|−|-|–|—|…", " ", text)
text = re.sub(",|;|:|!|\?|‘|’|\"|“|”|«|»|'", "", text)
text = re.sub("m", "м", text)
text = re.sub("o", "о", text)
text = re.sub("z", "з", text)
text = re.sub("i", "и", text)
text = re.sub("l", "л", text)
text = re.sub("a", "а", text)
text = re.sub("f", "ф", text)
text = re.sub("r", "р", text)
text = re.sub("e", "е", text)
text = re.sub("x", "кс", text)
text = re.sub("h", "х", text)
text = re.sub("\s+", " ", text)
if lang == "ru34":
text = re.sub("ё", "е", text)
text = re.sub(" $", "", text)
# print("<== {}".format(text))
text = np.array([vocabulary_dict[c] for c in text], dtype=np.long)
self.data.append((wav_file_path, text))
# continue
if os.path.exists(wav_file_path):
continue
# pass
x, sr = librosa.load(path=mp3_file_path, sr=None)
if desired_audio_sample_rate != sr:
y = lr_resample(y=x, orig_sr=sr, target_sr=desired_audio_sample_rate)
soundfile.write(file=wav_file_path, data=y, samplerate=desired_audio_sample_rate)
@staticmethod
def get_vocabulary_for_lang(lang="en"):
"""
Get the vocabulary for a language.
Parameters:
----------
lang : str, default 'en'
Language.
Returns:
-------
list of str
Vocabulary set.
"""
assert (lang in ("en", "fr", "de", "it", "es", "ca", "pl", "ru", "ru34"))
if lang == "en":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
elif lang == "fr":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
elif lang == "de":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
elif lang == "it":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
elif lang == "es":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
elif lang == "ca":
return [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
elif lang == "pl":
return [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
elif lang == "ru":
return [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
elif lang == "ru34":
return [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
else:
return None
class McvMetaInfo(DatasetMetaInfo):
def __init__(self):
super(McvMetaInfo, self).__init__()
self.label = "MCV"
self.short_label = "mcv"
self.root_dir_name = "cv-corpus-6.1-2020-12-11"
self.dataset_class = McvDataset
self.lang = "en"
self.dataset_class_extra_kwargs = {
"lang": self.lang,
"subset": "dev"}
self.ml_type = "asr"
self.num_classes = None
self.val_metric_extra_kwargs = [{"vocabulary": None}]
self.val_metric_capts = ["Val.WER"]
self.val_metric_names = ["WER"]
self.test_metric_extra_kwargs = [{"vocabulary": None}]
self.test_metric_capts = ["Test.WER"]
self.test_metric_names = ["WER"]
self.val_transform = asr_test_transform
self.test_transform = asr_test_transform
self.saver_acc_ind = 0
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
"""
Create python script parameters (for dataset specific metainfo).
Parameters:
----------
parser : ArgumentParser
ArgumentParser instance.
work_dir_path : str
Path to working directory.
"""
super(McvMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--lang",
type=str,
default="en",
help="language")
parser.add_argument(
"--subset",
type=str,
default="dev",
help="data subset")
def update(self,
args):
"""
Update dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(McvMetaInfo, self).update(args)
self.lang = args.lang
self.dataset_class_extra_kwargs["lang"] = args.lang
self.dataset_class_extra_kwargs["subset"] = args.subset
def update_from_dataset(self,
dataset):
"""
Update dataset metainfo after a dataset class instance creation.
Parameters:
----------
args : obj
A dataset class instance.
"""
vocabulary = dataset.vocabulary
self.num_classes = len(vocabulary) + 1
self.val_metric_extra_kwargs[0]["vocabulary"] = vocabulary
self.test_metric_extra_kwargs[0]["vocabulary"] = vocabulary
| 14,287
| 41.906907
| 119
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/cityscapes_seg_dataset.py
|
import os
import numpy as np
from PIL import Image
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class CityscapesSegDataset(SegDataset):
"""
Cityscapes semantic segmentation dataset.
Parameters:
----------
root : str
Path to a folder with `leftImg8bit` and `gtFine` subfolders.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CityscapesSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
image_dir_path = os.path.join(root, "leftImg8bit")
mask_dir_path = os.path.join(root, "gtFine")
assert os.path.exists(image_dir_path) and os.path.exists(mask_dir_path), "Please prepare dataset"
mode_dir_name = "train" if mode == "train" else "val"
image_dir_path = os.path.join(image_dir_path, mode_dir_name)
# mask_dir_path = os.path.join(mask_dir_path, mode_dir_name)
self.images = []
self.masks = []
for image_subdir_path, _, image_file_names in os.walk(image_dir_path):
for image_file_name in image_file_names:
if image_file_name.endswith(".png"):
image_file_path = os.path.join(image_subdir_path, image_file_name)
mask_file_name = image_file_name.replace("leftImg8bit", "gtFine_labelIds")
mask_subdir_path = image_subdir_path.replace("leftImg8bit", "gtFine")
mask_file_path = os.path.join(mask_subdir_path, mask_file_name)
if os.path.isfile(mask_file_path):
self.images.append(image_file_path)
self.masks.append(mask_file_path)
else:
print("Cannot find the mask: {}".format(mask_file_path))
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise RuntimeError("Found 0 images in subfolders of: {}\n".format(image_dir_path))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == "train":
image, mask = self._sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert (self.mode == "test")
image = self._img_transform(image)
mask = self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 19
vague_idx = 19
use_vague = True
background_idx = -1
ignore_bg = False
_key = np.array([-1, -1, -1, -1, -1, -1,
-1, -1, 0, 1, -1, -1,
2, 3, 4, -1, -1, -1,
5, -1, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
-1, -1, 16, 17, 18])
_mapping = np.array(range(-1, len(_key) - 1)).astype(np.int32)
@staticmethod
def _class_to_index(mask):
values = np.unique(mask)
for value in values:
assert(value in CityscapesSegDataset._mapping)
index = np.digitize(mask.ravel(), CityscapesSegDataset._mapping, right=True)
return CityscapesSegDataset._key[index].reshape(mask.shape)
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
np_mask = CityscapesSegDataset._class_to_index(np_mask)
np_mask[np_mask == -1] = CityscapesSegDataset.vague_idx
return np_mask
def __len__(self):
return len(self.images)
class CityscapesMetaInfo(VOCMetaInfo):
def __init__(self):
super(CityscapesMetaInfo, self).__init__()
self.label = "Cityscapes"
self.short_label = "voc"
self.root_dir_name = "cityscapes"
self.dataset_class = CityscapesSegDataset
self.num_classes = CityscapesSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"macro_average": False},
{"num_classes": CityscapesSegDataset.classes,
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"bg_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg,
"macro_average": False}]
self.test_net_extra_kwargs = self.net_extra_kwargs
| 5,066
| 37.097744
| 105
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/coco_seg_dataset.py
|
"""
COCO semantic segmentation dataset.
"""
import os
import logging
import numpy as np
from PIL import Image
from tqdm import trange
from .seg_dataset import SegDataset
from .voc_seg_dataset import VOCMetaInfo
class CocoSegDataset(SegDataset):
"""
COCO semantic segmentation dataset.
Parameters:
----------
root : str
Path to `annotations`, `train2017`, and `val2017` folders.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(CocoSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
mode_name = "train" if mode == "train" else "val"
annotations_dir_path = os.path.join(root, "annotations")
annotations_file_path = os.path.join(annotations_dir_path, "instances_" + mode_name + "2017.json")
idx_file_path = os.path.join(annotations_dir_path, mode_name + "_idx.npy")
self.image_dir_path = os.path.join(root, mode_name + "2017")
from pycocotools.coco import COCO
from pycocotools import mask as coco_mask
self.coco = COCO(annotations_file_path)
self.coco_mask = coco_mask
if os.path.exists(idx_file_path):
self.idx = np.load(idx_file_path)
else:
idx_list = list(self.coco.imgs.keys())
self.idx = self._filter_idx(idx_list, idx_file_path)
self.transform = transform
def __getitem__(self, index):
image_idx = int(self.idx[index])
img_metadata = self.coco.loadImgs(image_idx)[0]
image_file_name = img_metadata["file_name"]
image_file_path = os.path.join(self.image_dir_path, image_file_name)
image = Image.open(image_file_path).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(image_file_path)
coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=image_idx))
mask = Image.fromarray(self._gen_seg_mask(
coco_target,
img_metadata["height"],
img_metadata["width"]))
if self.mode == "train":
image, mask = self._sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert (self.mode == "test")
image, mask = self._img_transform(image), self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
def _gen_seg_mask(self, target, h, w):
cat_list = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, 1, 64, 20, 63, 7, 72]
mask = np.zeros((h, w), dtype=np.uint8)
for instance in target:
rle = self.coco_mask.frPyObjects(instance["segmentation"], h, w)
m = self.coco_mask.decode(rle)
cat = instance["category_id"]
if cat in cat_list:
c = cat_list.index(cat)
else:
continue
if len(m.shape) < 3:
mask[:, :] += (mask == 0) * (m * c)
else:
mask[:, :] += (mask == 0) * (((np.sum(m, axis=2)) > 0) * c).astype(np.uint8)
return mask
def _filter_idx(self,
idx,
idx_file,
pixels_thr=1000):
logging.info("Filtering mask index")
tbar = trange(len(idx))
filtered_idx = []
for i in tbar:
img_id = idx[i]
coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
img_metadata = self.coco.loadImgs(img_id)[0]
mask = self._gen_seg_mask(
coco_target,
img_metadata["height"],
img_metadata["width"])
if (mask > 0).sum() > pixels_thr:
filtered_idx.append(img_id)
tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx), len(filtered_idx)))
logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
np.save(idx_file, np.array(filtered_idx, np.int32))
return filtered_idx
classes = 21
vague_idx = -1
use_vague = False
background_idx = 0
ignore_bg = True
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
return np_mask
def __len__(self):
return len(self.idx)
class CocoSegMetaInfo(VOCMetaInfo):
def __init__(self):
super(CocoSegMetaInfo, self).__init__()
self.label = "COCO"
self.short_label = "coco"
self.root_dir_name = "coco"
self.dataset_class = CocoSegDataset
self.num_classes = CocoSegDataset.classes
self.test_metric_extra_kwargs = [
{"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"macro_average": False},
{"num_classes": CocoSegDataset.classes,
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"bg_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg,
"macro_average": False}]
| 5,605
| 34.0375
| 112
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/mpii_hpe_dataset.py
|
"""
MPII keypoint detection (2D single human pose estimation) dataset.
"""
import os
import logging
import json
import numpy as np
from scipy.io import loadmat, savemat
from collections import OrderedDict
from .hpe_dataset import HpeDataset
class MpiiHpeDataset(HpeDataset):
def __init__(self,
cfg,
root,
image_set,
is_train,
transform=None):
super(MpiiHpeDataset, self).__init__(cfg, root, image_set, is_train, transform)
self.num_joints = 16
self.flip_pairs = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
self.parent_ids = [1, 2, 6, 6, 3, 4, 6, 6, 7, 8, 11, 12, 7, 7, 13, 14]
self.db = self._get_db()
if is_train and cfg.DATASET.SELECT_DATA:
self.db = self.select_data(self.db)
logging.info('=> load {} samples'.format(len(self.db)))
def _get_db(self):
# create train/val split
file_name = os.path.join(self.root, 'annot', self.image_set + '.json')
with open(file_name) as anno_file:
anno = json.load(anno_file)
gt_db = []
for a in anno:
image_name = a['image']
c = np.array(a['center'], dtype=np.float)
s = np.array([a['scale'], a['scale']], dtype=np.float)
# Adjust center/scale slightly to avoid cropping limbs
if c[0] != -1:
c[1] = c[1] + 15 * s[1]
s = s * 1.25
# MPII uses matlab format, index is based 1,
# we should first convert to 0-based index
c = c - 1
joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)
joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)
if self.image_set != 'test':
joints = np.array(a['joints'])
joints[:, 0:2] = joints[:, 0:2] - 1
joints_vis = np.array(a['joints_vis'])
assert len(joints) == self.num_joints, 'joint num diff: {} vs {}'.format(len(joints), self.num_joints)
joints_3d[:, 0:2] = joints[:, 0:2]
joints_3d_vis[:, 0] = joints_vis[:]
joints_3d_vis[:, 1] = joints_vis[:]
image_dir = 'images.zip@' if self.data_format == 'zip' else 'images'
gt_db.append({
'image': os.path.join(self.root, image_dir, image_name),
'center': c,
'scale': s,
'joints_3d': joints_3d,
'joints_3d_vis': joints_3d_vis,
'filename': '',
'imgnum': 0,
})
return gt_db
def evaluate(self,
cfg,
preds,
output_dir,
*args,
**kwargs):
# convert 0-based index to 1-based index
preds = preds[:, :, 0:2] + 1.0
if output_dir:
pred_file = os.path.join(output_dir, 'pred.mat')
savemat(pred_file, mdict={'preds': preds})
if 'test' in cfg.DATASET.TEST_SET:
return {'Null': 0.0}, 0.0
SC_BIAS = 0.6
threshold = 0.5
gt_file = os.path.join(cfg.DATASET.ROOT, 'annot', 'gt_{}.mat'.format(cfg.DATASET.TEST_SET))
gt_dict = loadmat(gt_file)
dataset_joints = gt_dict['dataset_joints']
jnt_missing = gt_dict['jnt_missing']
pos_gt_src = gt_dict['pos_gt_src']
headboxes_src = gt_dict['headboxes_src']
pos_pred_src = np.transpose(preds, [1, 2, 0])
head = np.where(dataset_joints == 'head')[1][0]
lsho = np.where(dataset_joints == 'lsho')[1][0]
lelb = np.where(dataset_joints == 'lelb')[1][0]
lwri = np.where(dataset_joints == 'lwri')[1][0]
lhip = np.where(dataset_joints == 'lhip')[1][0]
lkne = np.where(dataset_joints == 'lkne')[1][0]
lank = np.where(dataset_joints == 'lank')[1][0]
rsho = np.where(dataset_joints == 'rsho')[1][0]
relb = np.where(dataset_joints == 'relb')[1][0]
rwri = np.where(dataset_joints == 'rwri')[1][0]
rkne = np.where(dataset_joints == 'rkne')[1][0]
rank = np.where(dataset_joints == 'rank')[1][0]
rhip = np.where(dataset_joints == 'rhip')[1][0]
jnt_visible = 1 - jnt_missing
uv_error = pos_pred_src - pos_gt_src
uv_err = np.linalg.norm(uv_error, axis=1)
headsizes = headboxes_src[1, :, :] - headboxes_src[0, :, :]
headsizes = np.linalg.norm(headsizes, axis=0)
headsizes *= SC_BIAS
scale = np.multiply(headsizes, np.ones((len(uv_err), 1)))
scaled_uv_err = np.divide(uv_err, scale)
scaled_uv_err = np.multiply(scaled_uv_err, jnt_visible)
jnt_count = np.sum(jnt_visible, axis=1)
less_than_threshold = np.multiply((scaled_uv_err <= threshold), jnt_visible)
PCKh = np.divide(100.0 * np.sum(less_than_threshold, axis=1), jnt_count)
# save
rng = np.arange(0, 0.5 + 0.01, 0.01)
pckAll = np.zeros((len(rng), 16))
for r in range(len(rng)):
threshold = rng[r]
less_than_threshold = np.multiply(scaled_uv_err <= threshold, jnt_visible)
pckAll[r, :] = np.divide(100.0 * np.sum(less_than_threshold, axis=1), jnt_count)
PCKh = np.ma.array(PCKh, mask=False)
PCKh.mask[6:8] = True
jnt_count = np.ma.array(jnt_count, mask=False)
jnt_count.mask[6:8] = True
jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)
name_value = [
('Head', PCKh[head]),
('Shoulder', 0.5 * (PCKh[lsho] + PCKh[rsho])),
('Elbow', 0.5 * (PCKh[lelb] + PCKh[relb])),
('Wrist', 0.5 * (PCKh[lwri] + PCKh[rwri])),
('Hip', 0.5 * (PCKh[lhip] + PCKh[rhip])),
('Knee', 0.5 * (PCKh[lkne] + PCKh[rkne])),
('Ankle', 0.5 * (PCKh[lank] + PCKh[rank])),
('Mean', np.sum(PCKh * jnt_ratio)),
('Mean@0.1', np.sum(pckAll[11, :] * jnt_ratio))
]
name_value = OrderedDict(name_value)
return name_value, name_value['Mean']
| 6,159
| 35.886228
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/voc_seg_dataset.py
|
"""
Pascal VOC2012 semantic segmentation dataset.
"""
import os
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
from .seg_dataset import SegDataset
from .dataset_metainfo import DatasetMetaInfo
class VOCSegDataset(SegDataset):
"""
Pascal VOC2012 semantic segmentation dataset.
Parameters:
----------
root : str
Path to VOCdevkit folder.
mode : str, default 'train'
'train', 'val', 'test', or 'demo'.
transform : callable, optional
A function that transforms the image.
"""
def __init__(self,
root,
mode="train",
transform=None,
**kwargs):
super(VOCSegDataset, self).__init__(
root=root,
mode=mode,
transform=transform,
**kwargs)
base_dir_path = os.path.join(root, "VOC2012")
image_dir_path = os.path.join(base_dir_path, "JPEGImages")
mask_dir_path = os.path.join(base_dir_path, "SegmentationClass")
splits_dir_path = os.path.join(base_dir_path, "ImageSets", "Segmentation")
if mode == "train":
split_file_path = os.path.join(splits_dir_path, "train.txt")
elif mode in ("val", "test", "demo"):
split_file_path = os.path.join(splits_dir_path, "val.txt")
else:
raise RuntimeError("Unknown dataset splitting mode")
self.images = []
self.masks = []
with open(os.path.join(split_file_path), "r") as lines:
for line in lines:
image_file_path = os.path.join(image_dir_path, line.rstrip('\n') + ".jpg")
assert os.path.isfile(image_file_path)
self.images.append(image_file_path)
mask_file_path = os.path.join(mask_dir_path, line.rstrip('\n') + ".png")
assert os.path.isfile(mask_file_path)
self.masks.append(mask_file_path)
assert (len(self.images) == len(self.masks))
def __getitem__(self, index):
image = Image.open(self.images[index]).convert("RGB")
if self.mode == "demo":
image = self._img_transform(image)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
if self.mode == "train":
image, mask = self._sync_transform(image, mask)
elif self.mode == "val":
image, mask = self._val_sync_transform(image, mask)
else:
assert self.mode == "test"
image, mask = self._img_transform(image), self._mask_transform(mask)
if self.transform is not None:
image = self.transform(image)
return image, mask
classes = 21
vague_idx = 255
use_vague = True
background_idx = 0
ignore_bg = True
@staticmethod
def _mask_transform(mask):
np_mask = np.array(mask).astype(np.int32)
# np_mask[np_mask == 255] = VOCSegDataset.vague_idx
return np_mask
def __len__(self):
return len(self.images)
def voc_test_transform(ds_metainfo,
mean_rgb=(0.485, 0.456, 0.406),
std_rgb=(0.229, 0.224, 0.225)):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
class VOCMetaInfo(DatasetMetaInfo):
def __init__(self):
super(VOCMetaInfo, self).__init__()
self.label = "VOC"
self.short_label = "voc"
self.root_dir_name = "voc"
self.dataset_class = VOCSegDataset
self.num_training_samples = None
self.in_channels = 3
self.num_classes = VOCSegDataset.classes
self.input_image_size = (480, 480)
self.train_metric_capts = None
self.train_metric_names = None
self.train_metric_extra_kwargs = None
self.val_metric_capts = None
self.val_metric_names = None
self.test_metric_extra_kwargs = [{}, {}]
self.test_metric_capts = ["Val.PixAcc", "Val.IoU"]
self.test_metric_names = ["PixelAccuracyMetric", "MeanIoUMetric"]
self.test_metric_extra_kwargs = [
{"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"macro_average": False},
{"num_classes": VOCSegDataset.classes,
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"bg_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg,
"macro_average": False}]
self.saver_acc_ind = 1
self.train_transform = None
self.val_transform = voc_test_transform
self.test_transform = voc_test_transform
self.ml_type = "imgseg"
self.allow_hybridize = False
self.net_extra_kwargs = {"aux": False, "fixed_size": False}
self.load_ignore_extra = True
self.image_base_size = 520
self.image_crop_size = 480
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(VOCMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--image-base-size",
type=int,
default=520,
help="base image size")
parser.add_argument(
"--image-crop-size",
type=int,
default=480,
help="crop image size")
def update(self,
args):
super(VOCMetaInfo, self).update(args)
self.image_base_size = args.image_base_size
self.image_crop_size = args.image_crop_size
| 5,894
| 33.273256
| 90
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/cifar100_cls_dataset.py
|
"""
CIFAR-100 classification dataset.
"""
import os
from torchvision.datasets import CIFAR100
from .cifar10_cls_dataset import CIFAR10MetaInfo
class CIFAR100Fine(CIFAR100):
"""
CIFAR-100 image classification dataset.
Parameters:
----------
root : str, default '~/.torch/datasets/cifar100'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "cifar100"),
mode="train",
transform=None):
super(CIFAR100Fine, self).__init__(
root=root,
train=(mode == "train"),
transform=transform,
download=True)
class CIFAR100MetaInfo(CIFAR10MetaInfo):
def __init__(self):
super(CIFAR100MetaInfo, self).__init__()
self.label = "CIFAR100"
self.root_dir_name = "cifar100"
self.dataset_class = CIFAR100Fine
self.num_classes = 100
| 1,132
| 25.97619
| 74
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/datasets/hpatches_mch_dataset.py
|
"""
HPatches image matching dataset.
"""
import os
import cv2
import numpy as np
import torch.utils.data as data
import torchvision.transforms as transforms
from .dataset_metainfo import DatasetMetaInfo
class HPatches(data.Dataset):
"""
HPatches (full image sequences) image matching dataset.
Info URL: https://github.com/hpatches/hpatches-dataset
Data URL: http://icvl.ee.ic.ac.uk/vbalnt/hpatches/hpatches-sequences-release.tar.gz
Parameters:
----------
root : str, default '~/.torch/datasets/hpatches'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
alteration : str, default 'all'
'all', 'i' for illumination or 'v' for viewpoint.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "hpatches"),
mode="train",
alteration="all",
transform=None):
super(HPatches, self).__init__()
assert os.path.exists(root)
num_images = 5
image_file_ext = ".ppm"
self.mode = mode
self.image_paths = []
self.warped_image_paths = []
self.homographies = []
subdir_names = [name for name in os.listdir(root) if os.path.isdir(os.path.join(root, name))]
# subdir_names.sort()
if alteration != "all":
subdir_names = [name for name in subdir_names if name[0] == alteration]
for subdir_name in subdir_names:
subdir_path = os.path.join(root, subdir_name)
for i in range(num_images):
k = i + 2
self.image_paths.append(os.path.join(subdir_path, "1" + image_file_ext))
self.warped_image_paths.append(os.path.join(subdir_path, str(k) + image_file_ext))
self.homographies.append(np.loadtxt(os.path.join(subdir_path, "H_1_" + str(k))))
self.transform = transform
def __getitem__(self, index):
# print("Image file name: {}, index: {}".format(self.image_paths[index], index))
image = cv2.imread(self.image_paths[index], flags=0)
# if image.shape[0] > 1500:
# image = cv2.resize(
# src=image,
# dsize=None,
# fx=0.5,
# fy=0.5,
# interpolation=cv2.INTER_AREA)
# print("Image shape: {}".format(image.shape))
warped_image = cv2.imread(self.warped_image_paths[index], flags=0)
# if warped_image.shape[0] > 1500:
# warped_image = cv2.resize(
# src=warped_image,
# dsize=None,
# fx=0.5,
# fy=0.5,
# interpolation=cv2.INTER_AREA)
# print("W-Image shape: {}".format(warped_image.shape))
homography = self.homographies[index].astype(np.float32)
if self.transform is not None:
image = self.transform(image)
warped_image = self.transform(warped_image)
return image, warped_image, homography
def __len__(self):
return len(self.image_paths)
class HPatchesMetaInfo(DatasetMetaInfo):
def __init__(self):
super(HPatchesMetaInfo, self).__init__()
self.label = "hpatches"
self.short_label = "hpatches"
self.root_dir_name = "hpatches"
self.dataset_class = HPatches
self.ml_type = "imgmch"
self.do_transform = True
self.val_transform = hpatches_val_transform
self.test_transform = hpatches_val_transform
self.allow_hybridize = False
self.net_extra_kwargs = {}
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(HPatchesMetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--alteration",
type=str,
default="all",
help="dataset alternation. options are all, i, or v")
def update(self,
args):
super(HPatchesMetaInfo, self).update(args)
self.dataset_class_extra_kwargs = {"alteration": args.alteration}
def hpatches_val_transform(ds_metainfo):
assert (ds_metainfo is not None)
return transforms.Compose([
transforms.ToTensor()
])
| 4,450
| 33.773438
| 101
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/__init__.py
| 0
| 0
| 0
|
py
|
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/model_provider.py
|
from .models.alexnet import *
from .models.zfnet import *
from .models.vgg import *
from .models.bninception import *
from .models.resnet import *
from .models.preresnet import *
from .models.resnext import *
from .models.seresnet import *
from .models.sepreresnet import *
from .models.seresnext import *
from .models.senet import *
from .models.resnesta import *
from .models.ibnresnet import *
from .models.ibnbresnet import *
from .models.ibnresnext import *
from .models.ibndensenet import *
from .models.airnet import *
from .models.airnext import *
from .models.bamresnet import *
from .models.cbamresnet import *
from .models.resattnet import *
from .models.sknet import *
from .models.scnet import *
from .models.regnet import *
from .models.diaresnet import *
from .models.diapreresnet import *
from .models.pyramidnet import *
from .models.diracnetv2 import *
from .models.sharesnet import *
from .models.densenet import *
from .models.condensenet import *
from .models.sparsenet import *
from .models.peleenet import *
from .models.wrn import *
from .models.drn import *
from .models.dpn import *
from .models.darknet import *
from .models.darknet53 import *
from .models.channelnet import *
from .models.isqrtcovresnet import *
from .models.revnet import *
from .models.irevnet import *
from .models.bagnet import *
from .models.dla import *
from .models.msdnet import *
from .models.fishnet import *
from .models.espnetv2 import *
from .models.dicenet import *
from .models.hrnet import *
from .models.vovnet import *
from .models.selecsls import *
from .models.hardnet import *
from .models.xdensenet import *
from .models.squeezenet import *
from .models.squeezenext import *
from .models.shufflenet import *
from .models.shufflenetv2 import *
from .models.shufflenetv2b import *
from .models.menet import *
from .models.mobilenet import *
from .models.mobilenetb import *
from .models.fdmobilenet import *
from .models.mobilenetv2 import *
from .models.mobilenetv3 import *
from .models.igcv3 import *
from .models.ghostnet import *
from .models.mnasnet import *
from .models.darts import *
from .models.proxylessnas import *
from .models.fbnet import *
from .models.xception import *
from .models.inceptionv3 import *
from .models.inceptionv4 import *
from .models.inceptionresnetv1 import *
from .models.inceptionresnetv2 import *
from .models.polynet import *
from .models.nasnet import *
from .models.pnasnet import *
from .models.spnasnet import *
from .models.efficientnet import *
from .models.efficientnetedge import *
from .models.mixnet import *
from .models.nin_cifar import *
from .models.resnet_cifar import *
from .models.preresnet_cifar import *
from .models.resnext_cifar import *
from .models.seresnet_cifar import *
from .models.sepreresnet_cifar import *
from .models.pyramidnet_cifar import *
from .models.densenet_cifar import *
from .models.xdensenet_cifar import *
from .models.wrn_cifar import *
from .models.wrn1bit_cifar import *
from .models.ror_cifar import *
from .models.rir_cifar import *
from .models.msdnet_cifar10 import *
from .models.resdropresnet_cifar import *
from .models.shakeshakeresnet_cifar import *
from .models.shakedropresnet_cifar import *
from .models.fractalnet_cifar import *
from .models.diaresnet_cifar import *
from .models.diapreresnet_cifar import *
from .models.octresnet import *
from .models.resneta import *
from .models.resnetd import *
from .models.fastseresnet import *
from .models.resnet_cub import *
from .models.seresnet_cub import *
from .models.mobilenet_cub import *
from .models.proxylessnas_cub import *
from .models.ntsnet_cub import *
from .models.fcn8sd import *
from .models.pspnet import *
from .models.deeplabv3 import *
from .models.icnet import *
from .models.fastscnn import *
from .models.cgnet import *
from .models.dabnet import *
from .models.sinet import *
from .models.bisenet import *
from .models.danet import *
from .models.fpenet import *
from .models.contextnet import *
from .models.lednet import *
from .models.esnet import *
from .models.edanet import *
from .models.enet import *
from .models.erfnet import *
from .models.linknet import *
from .models.segnet import *
from .models.unet import *
from .models.sqnet import *
from .models.alphapose_coco import *
from .models.simplepose_coco import *
from .models.simpleposemobile_coco import *
from .models.lwopenpose_cmupan import *
from .models.ibppose_coco import *
from .models.prnet import *
from .models.centernet import *
from .models.lffd import *
from .models.pfpcnet import *
from .models.voca import *
from .models.nvpattexp import *
from .models.superpointnet import *
from .models.jasper import *
from .models.jasperdr import *
from .models.quartznet import *
# from .models.others.oth_quartznet import *
# from .models.others.oth_pose_resnet import *
# from .models.others.oth_lwopenpose2d import *
# from .models.others.oth_lwopenpose3d import *
# from .models.others.oth_prnet import *
# from .models.others.oth_sinet import *
# from .models.others.oth_ibppose import *
# from .models.others.oth_bisenet1 import *
# from .models.others.oth_regnet import *
# from .models.others.oth_tresnet import *
# from .models.tresnet import *
# from .models.others.oth_dabnet import *
__all__ = ['get_model']
_models = {
'alexnet': alexnet,
'alexnetb': alexnetb,
'zfnet': zfnet,
'zfnetb': zfnetb,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'bn_vgg11': bn_vgg11,
'bn_vgg13': bn_vgg13,
'bn_vgg16': bn_vgg16,
'bn_vgg19': bn_vgg19,
'bn_vgg11b': bn_vgg11b,
'bn_vgg13b': bn_vgg13b,
'bn_vgg16b': bn_vgg16b,
'bn_vgg19b': bn_vgg19b,
'bninception': bninception,
'resnet10': resnet10,
'resnet12': resnet12,
'resnet14': resnet14,
'resnetbc14b': resnetbc14b,
'resnet16': resnet16,
'resnet18_wd4': resnet18_wd4,
'resnet18_wd2': resnet18_wd2,
'resnet18_w3d4': resnet18_w3d4,
'resnet18': resnet18,
'resnet26': resnet26,
'resnetbc26b': resnetbc26b,
'resnet34': resnet34,
'resnetbc38b': resnetbc38b,
'resnet50': resnet50,
'resnet50b': resnet50b,
'resnet101': resnet101,
'resnet101b': resnet101b,
'resnet152': resnet152,
'resnet152b': resnet152b,
'resnet200': resnet200,
'resnet200b': resnet200b,
'preresnet10': preresnet10,
'preresnet12': preresnet12,
'preresnet14': preresnet14,
'preresnetbc14b': preresnetbc14b,
'preresnet16': preresnet16,
'preresnet18_wd4': preresnet18_wd4,
'preresnet18_wd2': preresnet18_wd2,
'preresnet18_w3d4': preresnet18_w3d4,
'preresnet18': preresnet18,
'preresnet26': preresnet26,
'preresnetbc26b': preresnetbc26b,
'preresnet34': preresnet34,
'preresnetbc38b': preresnetbc38b,
'preresnet50': preresnet50,
'preresnet50b': preresnet50b,
'preresnet101': preresnet101,
'preresnet101b': preresnet101b,
'preresnet152': preresnet152,
'preresnet152b': preresnet152b,
'preresnet200': preresnet200,
'preresnet200b': preresnet200b,
'preresnet269b': preresnet269b,
'resnext14_16x4d': resnext14_16x4d,
'resnext14_32x2d': resnext14_32x2d,
'resnext14_32x4d': resnext14_32x4d,
'resnext26_16x4d': resnext26_16x4d,
'resnext26_32x2d': resnext26_32x2d,
'resnext26_32x4d': resnext26_32x4d,
'resnext38_32x4d': resnext38_32x4d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'seresnet10': seresnet10,
'seresnet12': seresnet12,
'seresnet14': seresnet14,
'seresnet16': seresnet16,
'seresnet18': seresnet18,
'seresnet26': seresnet26,
'seresnetbc26b': seresnetbc26b,
'seresnet34': seresnet34,
'seresnetbc38b': seresnetbc38b,
'seresnet50': seresnet50,
'seresnet50b': seresnet50b,
'seresnet101': seresnet101,
'seresnet101b': seresnet101b,
'seresnet152': seresnet152,
'seresnet152b': seresnet152b,
'seresnet200': seresnet200,
'seresnet200b': seresnet200b,
'sepreresnet10': sepreresnet10,
'sepreresnet12': sepreresnet12,
'sepreresnet14': sepreresnet14,
'sepreresnet16': sepreresnet16,
'sepreresnet18': sepreresnet18,
'sepreresnet26': sepreresnet26,
'sepreresnetbc26b': sepreresnetbc26b,
'sepreresnet34': sepreresnet34,
'sepreresnetbc38b': sepreresnetbc38b,
'sepreresnet50': sepreresnet50,
'sepreresnet50b': sepreresnet50b,
'sepreresnet101': sepreresnet101,
'sepreresnet101b': sepreresnet101b,
'sepreresnet152': sepreresnet152,
'sepreresnet152b': sepreresnet152b,
'sepreresnet200': sepreresnet200,
'sepreresnet200b': sepreresnet200b,
'seresnext50_32x4d': seresnext50_32x4d,
'seresnext101_32x4d': seresnext101_32x4d,
'seresnext101_64x4d': seresnext101_64x4d,
'senet16': senet16,
'senet28': senet28,
'senet40': senet40,
'senet52': senet52,
'senet103': senet103,
'senet154': senet154,
'resnestabc14': resnestabc14,
'resnesta18': resnesta18,
'resnestabc26': resnestabc26,
'resnesta50': resnesta50,
'resnesta101': resnesta101,
'resnesta152': resnesta152,
'resnesta200': resnesta200,
'resnesta269': resnesta269,
'ibn_resnet50': ibn_resnet50,
'ibn_resnet101': ibn_resnet101,
'ibn_resnet152': ibn_resnet152,
'ibnb_resnet50': ibnb_resnet50,
'ibnb_resnet101': ibnb_resnet101,
'ibnb_resnet152': ibnb_resnet152,
'ibn_resnext50_32x4d': ibn_resnext50_32x4d,
'ibn_resnext101_32x4d': ibn_resnext101_32x4d,
'ibn_resnext101_64x4d': ibn_resnext101_64x4d,
'ibn_densenet121': ibn_densenet121,
'ibn_densenet161': ibn_densenet161,
'ibn_densenet169': ibn_densenet169,
'ibn_densenet201': ibn_densenet201,
'airnet50_1x64d_r2': airnet50_1x64d_r2,
'airnet50_1x64d_r16': airnet50_1x64d_r16,
'airnet101_1x64d_r2': airnet101_1x64d_r2,
'airnext50_32x4d_r2': airnext50_32x4d_r2,
'airnext101_32x4d_r2': airnext101_32x4d_r2,
'airnext101_32x4d_r16': airnext101_32x4d_r16,
'bam_resnet18': bam_resnet18,
'bam_resnet34': bam_resnet34,
'bam_resnet50': bam_resnet50,
'bam_resnet101': bam_resnet101,
'bam_resnet152': bam_resnet152,
'cbam_resnet18': cbam_resnet18,
'cbam_resnet34': cbam_resnet34,
'cbam_resnet50': cbam_resnet50,
'cbam_resnet101': cbam_resnet101,
'cbam_resnet152': cbam_resnet152,
'resattnet56': resattnet56,
'resattnet92': resattnet92,
'resattnet128': resattnet128,
'resattnet164': resattnet164,
'resattnet200': resattnet200,
'resattnet236': resattnet236,
'resattnet452': resattnet452,
'sknet50': sknet50,
'sknet101': sknet101,
'sknet152': sknet152,
'scnet50': scnet50,
'scnet101': scnet101,
'scneta50': scneta50,
'scneta101': scneta101,
'regnetx002': regnetx002,
'regnetx004': regnetx004,
'regnetx006': regnetx006,
'regnetx008': regnetx008,
'regnetx016': regnetx016,
'regnetx032': regnetx032,
'regnetx040': regnetx040,
'regnetx064': regnetx064,
'regnetx080': regnetx080,
'regnetx120': regnetx120,
'regnetx160': regnetx160,
'regnetx320': regnetx320,
'regnety002': regnety002,
'regnety004': regnety004,
'regnety006': regnety006,
'regnety008': regnety008,
'regnety016': regnety016,
'regnety032': regnety032,
'regnety040': regnety040,
'regnety064': regnety064,
'regnety080': regnety080,
'regnety120': regnety120,
'regnety160': regnety160,
'regnety320': regnety320,
'diaresnet10': diaresnet10,
'diaresnet12': diaresnet12,
'diaresnet14': diaresnet14,
'diaresnetbc14b': diaresnetbc14b,
'diaresnet16': diaresnet16,
'diaresnet18': diaresnet18,
'diaresnet26': diaresnet26,
'diaresnetbc26b': diaresnetbc26b,
'diaresnet34': diaresnet34,
'diaresnetbc38b': diaresnetbc38b,
'diaresnet50': diaresnet50,
'diaresnet50b': diaresnet50b,
'diaresnet101': diaresnet101,
'diaresnet101b': diaresnet101b,
'diaresnet152': diaresnet152,
'diaresnet152b': diaresnet152b,
'diaresnet200': diaresnet200,
'diaresnet200b': diaresnet200b,
'diapreresnet10': diapreresnet10,
'diapreresnet12': diapreresnet12,
'diapreresnet14': diapreresnet14,
'diapreresnetbc14b': diapreresnetbc14b,
'diapreresnet16': diapreresnet16,
'diapreresnet18': diapreresnet18,
'diapreresnet26': diapreresnet26,
'diapreresnetbc26b': diapreresnetbc26b,
'diapreresnet34': diapreresnet34,
'diapreresnetbc38b': diapreresnetbc38b,
'diapreresnet50': diapreresnet50,
'diapreresnet50b': diapreresnet50b,
'diapreresnet101': diapreresnet101,
'diapreresnet101b': diapreresnet101b,
'diapreresnet152': diapreresnet152,
'diapreresnet152b': diapreresnet152b,
'diapreresnet200': diapreresnet200,
'diapreresnet200b': diapreresnet200b,
'diapreresnet269b': diapreresnet269b,
'pyramidnet101_a360': pyramidnet101_a360,
'diracnet18v2': diracnet18v2,
'diracnet34v2': diracnet34v2,
'sharesnet18': sharesnet18,
'sharesnet34': sharesnet34,
'sharesnet50': sharesnet50,
'sharesnet50b': sharesnet50b,
'sharesnet101': sharesnet101,
'sharesnet101b': sharesnet101b,
'sharesnet152': sharesnet152,
'sharesnet152b': sharesnet152b,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'condensenet74_c4_g4': condensenet74_c4_g4,
'condensenet74_c8_g8': condensenet74_c8_g8,
'sparsenet121': sparsenet121,
'sparsenet161': sparsenet161,
'sparsenet169': sparsenet169,
'sparsenet201': sparsenet201,
'sparsenet264': sparsenet264,
'peleenet': peleenet,
'wrn50_2': wrn50_2,
'drnc26': drnc26,
'drnc42': drnc42,
'drnc58': drnc58,
'drnd22': drnd22,
'drnd38': drnd38,
'drnd54': drnd54,
'drnd105': drnd105,
'dpn68': dpn68,
'dpn68b': dpn68b,
'dpn98': dpn98,
'dpn107': dpn107,
'dpn131': dpn131,
'darknet_ref': darknet_ref,
'darknet_tiny': darknet_tiny,
'darknet19': darknet19,
'darknet53': darknet53,
'channelnet': channelnet,
'revnet38': revnet38,
'revnet110': revnet110,
'revnet164': revnet164,
'irevnet301': irevnet301,
'bagnet9': bagnet9,
'bagnet17': bagnet17,
'bagnet33': bagnet33,
'dla34': dla34,
'dla46c': dla46c,
'dla46xc': dla46xc,
'dla60': dla60,
'dla60x': dla60x,
'dla60xc': dla60xc,
'dla102': dla102,
'dla102x': dla102x,
'dla102x2': dla102x2,
'dla169': dla169,
'msdnet22': msdnet22,
'fishnet99': fishnet99,
'fishnet150': fishnet150,
'espnetv2_wd2': espnetv2_wd2,
'espnetv2_w1': espnetv2_w1,
'espnetv2_w5d4': espnetv2_w5d4,
'espnetv2_w3d2': espnetv2_w3d2,
'espnetv2_w2': espnetv2_w2,
'dicenet_wd5': dicenet_wd5,
'dicenet_wd2': dicenet_wd2,
'dicenet_w3d4': dicenet_w3d4,
'dicenet_w1': dicenet_w1,
'dicenet_w5d4': dicenet_w5d4,
'dicenet_w3d2': dicenet_w3d2,
'dicenet_w7d8': dicenet_w7d8,
'dicenet_w2': dicenet_w2,
'hrnet_w18_small_v1': hrnet_w18_small_v1,
'hrnet_w18_small_v2': hrnet_w18_small_v2,
'hrnetv2_w18': hrnetv2_w18,
'hrnetv2_w30': hrnetv2_w30,
'hrnetv2_w32': hrnetv2_w32,
'hrnetv2_w40': hrnetv2_w40,
'hrnetv2_w44': hrnetv2_w44,
'hrnetv2_w48': hrnetv2_w48,
'hrnetv2_w64': hrnetv2_w64,
'vovnet27s': vovnet27s,
'vovnet39': vovnet39,
'vovnet57': vovnet57,
'selecsls42': selecsls42,
'selecsls42b': selecsls42b,
'selecsls60': selecsls60,
'selecsls60b': selecsls60b,
'selecsls84': selecsls84,
'hardnet39ds': hardnet39ds,
'hardnet68ds': hardnet68ds,
'hardnet68': hardnet68,
'hardnet85': hardnet85,
'xdensenet121_2': xdensenet121_2,
'xdensenet161_2': xdensenet161_2,
'xdensenet169_2': xdensenet169_2,
'xdensenet201_2': xdensenet201_2,
'squeezenet_v1_0': squeezenet_v1_0,
'squeezenet_v1_1': squeezenet_v1_1,
'squeezeresnet_v1_0': squeezeresnet_v1_0,
'squeezeresnet_v1_1': squeezeresnet_v1_1,
'sqnxt23_w1': sqnxt23_w1,
'sqnxt23_w3d2': sqnxt23_w3d2,
'sqnxt23_w2': sqnxt23_w2,
'sqnxt23v5_w1': sqnxt23v5_w1,
'sqnxt23v5_w3d2': sqnxt23v5_w3d2,
'sqnxt23v5_w2': sqnxt23v5_w2,
'shufflenet_g1_w1': shufflenet_g1_w1,
'shufflenet_g2_w1': shufflenet_g2_w1,
'shufflenet_g3_w1': shufflenet_g3_w1,
'shufflenet_g4_w1': shufflenet_g4_w1,
'shufflenet_g8_w1': shufflenet_g8_w1,
'shufflenet_g1_w3d4': shufflenet_g1_w3d4,
'shufflenet_g3_w3d4': shufflenet_g3_w3d4,
'shufflenet_g1_wd2': shufflenet_g1_wd2,
'shufflenet_g3_wd2': shufflenet_g3_wd2,
'shufflenet_g1_wd4': shufflenet_g1_wd4,
'shufflenet_g3_wd4': shufflenet_g3_wd4,
'shufflenetv2_wd2': shufflenetv2_wd2,
'shufflenetv2_w1': shufflenetv2_w1,
'shufflenetv2_w3d2': shufflenetv2_w3d2,
'shufflenetv2_w2': shufflenetv2_w2,
'shufflenetv2b_wd2': shufflenetv2b_wd2,
'shufflenetv2b_w1': shufflenetv2b_w1,
'shufflenetv2b_w3d2': shufflenetv2b_w3d2,
'shufflenetv2b_w2': shufflenetv2b_w2,
'menet108_8x1_g3': menet108_8x1_g3,
'menet128_8x1_g4': menet128_8x1_g4,
'menet160_8x1_g8': menet160_8x1_g8,
'menet228_12x1_g3': menet228_12x1_g3,
'menet256_12x1_g4': menet256_12x1_g4,
'menet348_12x1_g3': menet348_12x1_g3,
'menet352_12x1_g8': menet352_12x1_g8,
'menet456_24x1_g3': menet456_24x1_g3,
'mobilenet_w1': mobilenet_w1,
'mobilenet_w3d4': mobilenet_w3d4,
'mobilenet_wd2': mobilenet_wd2,
'mobilenet_wd4': mobilenet_wd4,
'mobilenetb_w1': mobilenetb_w1,
'mobilenetb_w3d4': mobilenetb_w3d4,
'mobilenetb_wd2': mobilenetb_wd2,
'mobilenetb_wd4': mobilenetb_wd4,
'fdmobilenet_w1': fdmobilenet_w1,
'fdmobilenet_w3d4': fdmobilenet_w3d4,
'fdmobilenet_wd2': fdmobilenet_wd2,
'fdmobilenet_wd4': fdmobilenet_wd4,
'mobilenetv2_w1': mobilenetv2_w1,
'mobilenetv2_w3d4': mobilenetv2_w3d4,
'mobilenetv2_wd2': mobilenetv2_wd2,
'mobilenetv2_wd4': mobilenetv2_wd4,
'mobilenetv2b_w1': mobilenetv2b_w1,
'mobilenetv2b_w3d4': mobilenetv2b_w3d4,
'mobilenetv2b_wd2': mobilenetv2b_wd2,
'mobilenetv2b_wd4': mobilenetv2b_wd4,
'mobilenetv3_small_w7d20': mobilenetv3_small_w7d20,
'mobilenetv3_small_wd2': mobilenetv3_small_wd2,
'mobilenetv3_small_w3d4': mobilenetv3_small_w3d4,
'mobilenetv3_small_w1': mobilenetv3_small_w1,
'mobilenetv3_small_w5d4': mobilenetv3_small_w5d4,
'mobilenetv3_large_w7d20': mobilenetv3_large_w7d20,
'mobilenetv3_large_wd2': mobilenetv3_large_wd2,
'mobilenetv3_large_w3d4': mobilenetv3_large_w3d4,
'mobilenetv3_large_w1': mobilenetv3_large_w1,
'mobilenetv3_large_w5d4': mobilenetv3_large_w5d4,
'igcv3_w1': igcv3_w1,
'igcv3_w3d4': igcv3_w3d4,
'igcv3_wd2': igcv3_wd2,
'igcv3_wd4': igcv3_wd4,
'ghostnet': ghostnet,
'mnasnet_b1': mnasnet_b1,
'mnasnet_a1': mnasnet_a1,
'mnasnet_small': mnasnet_small,
'darts': darts,
'proxylessnas_cpu': proxylessnas_cpu,
'proxylessnas_gpu': proxylessnas_gpu,
'proxylessnas_mobile': proxylessnas_mobile,
'proxylessnas_mobile14': proxylessnas_mobile14,
'fbnet_cb': fbnet_cb,
'xception': xception,
'inceptionv3': inceptionv3,
'inceptionv4': inceptionv4,
'inceptionresnetv1': inceptionresnetv1,
'inceptionresnetv2': inceptionresnetv2,
'polynet': polynet,
'nasnet_4a1056': nasnet_4a1056,
'nasnet_6a4032': nasnet_6a4032,
'pnasnet5large': pnasnet5large,
'spnasnet': spnasnet,
'efficientnet_b0': efficientnet_b0,
'efficientnet_b1': efficientnet_b1,
'efficientnet_b2': efficientnet_b2,
'efficientnet_b3': efficientnet_b3,
'efficientnet_b4': efficientnet_b4,
'efficientnet_b5': efficientnet_b5,
'efficientnet_b6': efficientnet_b6,
'efficientnet_b7': efficientnet_b7,
'efficientnet_b8': efficientnet_b8,
'efficientnet_b0b': efficientnet_b0b,
'efficientnet_b1b': efficientnet_b1b,
'efficientnet_b2b': efficientnet_b2b,
'efficientnet_b3b': efficientnet_b3b,
'efficientnet_b4b': efficientnet_b4b,
'efficientnet_b5b': efficientnet_b5b,
'efficientnet_b6b': efficientnet_b6b,
'efficientnet_b7b': efficientnet_b7b,
'efficientnet_b0c': efficientnet_b0c,
'efficientnet_b1c': efficientnet_b1c,
'efficientnet_b2c': efficientnet_b2c,
'efficientnet_b3c': efficientnet_b3c,
'efficientnet_b4c': efficientnet_b4c,
'efficientnet_b5c': efficientnet_b5c,
'efficientnet_b6c': efficientnet_b6c,
'efficientnet_b7c': efficientnet_b7c,
'efficientnet_b8c': efficientnet_b8c,
'efficientnet_edge_small_b': efficientnet_edge_small_b,
'efficientnet_edge_medium_b': efficientnet_edge_medium_b,
'efficientnet_edge_large_b': efficientnet_edge_large_b,
'mixnet_s': mixnet_s,
'mixnet_m': mixnet_m,
'mixnet_l': mixnet_l,
'nin_cifar10': nin_cifar10,
'nin_cifar100': nin_cifar100,
'nin_svhn': nin_svhn,
'resnet20_cifar10': resnet20_cifar10,
'resnet20_cifar100': resnet20_cifar100,
'resnet20_svhn': resnet20_svhn,
'resnet56_cifar10': resnet56_cifar10,
'resnet56_cifar100': resnet56_cifar100,
'resnet56_svhn': resnet56_svhn,
'resnet110_cifar10': resnet110_cifar10,
'resnet110_cifar100': resnet110_cifar100,
'resnet110_svhn': resnet110_svhn,
'resnet164bn_cifar10': resnet164bn_cifar10,
'resnet164bn_cifar100': resnet164bn_cifar100,
'resnet164bn_svhn': resnet164bn_svhn,
'resnet272bn_cifar10': resnet272bn_cifar10,
'resnet272bn_cifar100': resnet272bn_cifar100,
'resnet272bn_svhn': resnet272bn_svhn,
'resnet542bn_cifar10': resnet542bn_cifar10,
'resnet542bn_cifar100': resnet542bn_cifar100,
'resnet542bn_svhn': resnet542bn_svhn,
'resnet1001_cifar10': resnet1001_cifar10,
'resnet1001_cifar100': resnet1001_cifar100,
'resnet1001_svhn': resnet1001_svhn,
'resnet1202_cifar10': resnet1202_cifar10,
'resnet1202_cifar100': resnet1202_cifar100,
'resnet1202_svhn': resnet1202_svhn,
'preresnet20_cifar10': preresnet20_cifar10,
'preresnet20_cifar100': preresnet20_cifar100,
'preresnet20_svhn': preresnet20_svhn,
'preresnet56_cifar10': preresnet56_cifar10,
'preresnet56_cifar100': preresnet56_cifar100,
'preresnet56_svhn': preresnet56_svhn,
'preresnet110_cifar10': preresnet110_cifar10,
'preresnet110_cifar100': preresnet110_cifar100,
'preresnet110_svhn': preresnet110_svhn,
'preresnet164bn_cifar10': preresnet164bn_cifar10,
'preresnet164bn_cifar100': preresnet164bn_cifar100,
'preresnet164bn_svhn': preresnet164bn_svhn,
'preresnet272bn_cifar10': preresnet272bn_cifar10,
'preresnet272bn_cifar100': preresnet272bn_cifar100,
'preresnet272bn_svhn': preresnet272bn_svhn,
'preresnet542bn_cifar10': preresnet542bn_cifar10,
'preresnet542bn_cifar100': preresnet542bn_cifar100,
'preresnet542bn_svhn': preresnet542bn_svhn,
'preresnet1001_cifar10': preresnet1001_cifar10,
'preresnet1001_cifar100': preresnet1001_cifar100,
'preresnet1001_svhn': preresnet1001_svhn,
'preresnet1202_cifar10': preresnet1202_cifar10,
'preresnet1202_cifar100': preresnet1202_cifar100,
'preresnet1202_svhn': preresnet1202_svhn,
'resnext20_16x4d_cifar10': resnext20_16x4d_cifar10,
'resnext20_16x4d_cifar100': resnext20_16x4d_cifar100,
'resnext20_16x4d_svhn': resnext20_16x4d_svhn,
'resnext20_32x2d_cifar10': resnext20_32x2d_cifar10,
'resnext20_32x2d_cifar100': resnext20_32x2d_cifar100,
'resnext20_32x2d_svhn': resnext20_32x2d_svhn,
'resnext20_32x4d_cifar10': resnext20_32x4d_cifar10,
'resnext20_32x4d_cifar100': resnext20_32x4d_cifar100,
'resnext20_32x4d_svhn': resnext20_32x4d_svhn,
'resnext29_32x4d_cifar10': resnext29_32x4d_cifar10,
'resnext29_32x4d_cifar100': resnext29_32x4d_cifar100,
'resnext29_32x4d_svhn': resnext29_32x4d_svhn,
'resnext29_16x64d_cifar10': resnext29_16x64d_cifar10,
'resnext29_16x64d_cifar100': resnext29_16x64d_cifar100,
'resnext29_16x64d_svhn': resnext29_16x64d_svhn,
'resnext272_1x64d_cifar10': resnext272_1x64d_cifar10,
'resnext272_1x64d_cifar100': resnext272_1x64d_cifar100,
'resnext272_1x64d_svhn': resnext272_1x64d_svhn,
'resnext272_2x32d_cifar10': resnext272_2x32d_cifar10,
'resnext272_2x32d_cifar100': resnext272_2x32d_cifar100,
'resnext272_2x32d_svhn': resnext272_2x32d_svhn,
'seresnet20_cifar10': seresnet20_cifar10,
'seresnet20_cifar100': seresnet20_cifar100,
'seresnet20_svhn': seresnet20_svhn,
'seresnet56_cifar10': seresnet56_cifar10,
'seresnet56_cifar100': seresnet56_cifar100,
'seresnet56_svhn': seresnet56_svhn,
'seresnet110_cifar10': seresnet110_cifar10,
'seresnet110_cifar100': seresnet110_cifar100,
'seresnet110_svhn': seresnet110_svhn,
'seresnet164bn_cifar10': seresnet164bn_cifar10,
'seresnet164bn_cifar100': seresnet164bn_cifar100,
'seresnet164bn_svhn': seresnet164bn_svhn,
'seresnet272bn_cifar10': seresnet272bn_cifar10,
'seresnet272bn_cifar100': seresnet272bn_cifar100,
'seresnet272bn_svhn': seresnet272bn_svhn,
'seresnet542bn_cifar10': seresnet542bn_cifar10,
'seresnet542bn_cifar100': seresnet542bn_cifar100,
'seresnet542bn_svhn': seresnet542bn_svhn,
'seresnet1001_cifar10': seresnet1001_cifar10,
'seresnet1001_cifar100': seresnet1001_cifar100,
'seresnet1001_svhn': seresnet1001_svhn,
'seresnet1202_cifar10': seresnet1202_cifar10,
'seresnet1202_cifar100': seresnet1202_cifar100,
'seresnet1202_svhn': seresnet1202_svhn,
'sepreresnet20_cifar10': sepreresnet20_cifar10,
'sepreresnet20_cifar100': sepreresnet20_cifar100,
'sepreresnet20_svhn': sepreresnet20_svhn,
'sepreresnet56_cifar10': sepreresnet56_cifar10,
'sepreresnet56_cifar100': sepreresnet56_cifar100,
'sepreresnet56_svhn': sepreresnet56_svhn,
'sepreresnet110_cifar10': sepreresnet110_cifar10,
'sepreresnet110_cifar100': sepreresnet110_cifar100,
'sepreresnet110_svhn': sepreresnet110_svhn,
'sepreresnet164bn_cifar10': sepreresnet164bn_cifar10,
'sepreresnet164bn_cifar100': sepreresnet164bn_cifar100,
'sepreresnet164bn_svhn': sepreresnet164bn_svhn,
'sepreresnet272bn_cifar10': sepreresnet272bn_cifar10,
'sepreresnet272bn_cifar100': sepreresnet272bn_cifar100,
'sepreresnet272bn_svhn': sepreresnet272bn_svhn,
'sepreresnet542bn_cifar10': sepreresnet542bn_cifar10,
'sepreresnet542bn_cifar100': sepreresnet542bn_cifar100,
'sepreresnet542bn_svhn': sepreresnet542bn_svhn,
'sepreresnet1001_cifar10': sepreresnet1001_cifar10,
'sepreresnet1001_cifar100': sepreresnet1001_cifar100,
'sepreresnet1001_svhn': sepreresnet1001_svhn,
'sepreresnet1202_cifar10': sepreresnet1202_cifar10,
'sepreresnet1202_cifar100': sepreresnet1202_cifar100,
'sepreresnet1202_svhn': sepreresnet1202_svhn,
'pyramidnet110_a48_cifar10': pyramidnet110_a48_cifar10,
'pyramidnet110_a48_cifar100': pyramidnet110_a48_cifar100,
'pyramidnet110_a48_svhn': pyramidnet110_a48_svhn,
'pyramidnet110_a84_cifar10': pyramidnet110_a84_cifar10,
'pyramidnet110_a84_cifar100': pyramidnet110_a84_cifar100,
'pyramidnet110_a84_svhn': pyramidnet110_a84_svhn,
'pyramidnet110_a270_cifar10': pyramidnet110_a270_cifar10,
'pyramidnet110_a270_cifar100': pyramidnet110_a270_cifar100,
'pyramidnet110_a270_svhn': pyramidnet110_a270_svhn,
'pyramidnet164_a270_bn_cifar10': pyramidnet164_a270_bn_cifar10,
'pyramidnet164_a270_bn_cifar100': pyramidnet164_a270_bn_cifar100,
'pyramidnet164_a270_bn_svhn': pyramidnet164_a270_bn_svhn,
'pyramidnet200_a240_bn_cifar10': pyramidnet200_a240_bn_cifar10,
'pyramidnet200_a240_bn_cifar100': pyramidnet200_a240_bn_cifar100,
'pyramidnet200_a240_bn_svhn': pyramidnet200_a240_bn_svhn,
'pyramidnet236_a220_bn_cifar10': pyramidnet236_a220_bn_cifar10,
'pyramidnet236_a220_bn_cifar100': pyramidnet236_a220_bn_cifar100,
'pyramidnet236_a220_bn_svhn': pyramidnet236_a220_bn_svhn,
'pyramidnet272_a200_bn_cifar10': pyramidnet272_a200_bn_cifar10,
'pyramidnet272_a200_bn_cifar100': pyramidnet272_a200_bn_cifar100,
'pyramidnet272_a200_bn_svhn': pyramidnet272_a200_bn_svhn,
'densenet40_k12_cifar10': densenet40_k12_cifar10,
'densenet40_k12_cifar100': densenet40_k12_cifar100,
'densenet40_k12_svhn': densenet40_k12_svhn,
'densenet40_k12_bc_cifar10': densenet40_k12_bc_cifar10,
'densenet40_k12_bc_cifar100': densenet40_k12_bc_cifar100,
'densenet40_k12_bc_svhn': densenet40_k12_bc_svhn,
'densenet40_k24_bc_cifar10': densenet40_k24_bc_cifar10,
'densenet40_k24_bc_cifar100': densenet40_k24_bc_cifar100,
'densenet40_k24_bc_svhn': densenet40_k24_bc_svhn,
'densenet40_k36_bc_cifar10': densenet40_k36_bc_cifar10,
'densenet40_k36_bc_cifar100': densenet40_k36_bc_cifar100,
'densenet40_k36_bc_svhn': densenet40_k36_bc_svhn,
'densenet100_k12_cifar10': densenet100_k12_cifar10,
'densenet100_k12_cifar100': densenet100_k12_cifar100,
'densenet100_k12_svhn': densenet100_k12_svhn,
'densenet100_k24_cifar10': densenet100_k24_cifar10,
'densenet100_k24_cifar100': densenet100_k24_cifar100,
'densenet100_k24_svhn': densenet100_k24_svhn,
'densenet100_k12_bc_cifar10': densenet100_k12_bc_cifar10,
'densenet100_k12_bc_cifar100': densenet100_k12_bc_cifar100,
'densenet100_k12_bc_svhn': densenet100_k12_bc_svhn,
'densenet190_k40_bc_cifar10': densenet190_k40_bc_cifar10,
'densenet190_k40_bc_cifar100': densenet190_k40_bc_cifar100,
'densenet190_k40_bc_svhn': densenet190_k40_bc_svhn,
'densenet250_k24_bc_cifar10': densenet250_k24_bc_cifar10,
'densenet250_k24_bc_cifar100': densenet250_k24_bc_cifar100,
'densenet250_k24_bc_svhn': densenet250_k24_bc_svhn,
'xdensenet40_2_k24_bc_cifar10': xdensenet40_2_k24_bc_cifar10,
'xdensenet40_2_k24_bc_cifar100': xdensenet40_2_k24_bc_cifar100,
'xdensenet40_2_k24_bc_svhn': xdensenet40_2_k24_bc_svhn,
'xdensenet40_2_k36_bc_cifar10': xdensenet40_2_k36_bc_cifar10,
'xdensenet40_2_k36_bc_cifar100': xdensenet40_2_k36_bc_cifar100,
'xdensenet40_2_k36_bc_svhn': xdensenet40_2_k36_bc_svhn,
'wrn16_10_cifar10': wrn16_10_cifar10,
'wrn16_10_cifar100': wrn16_10_cifar100,
'wrn16_10_svhn': wrn16_10_svhn,
'wrn28_10_cifar10': wrn28_10_cifar10,
'wrn28_10_cifar100': wrn28_10_cifar100,
'wrn28_10_svhn': wrn28_10_svhn,
'wrn40_8_cifar10': wrn40_8_cifar10,
'wrn40_8_cifar100': wrn40_8_cifar100,
'wrn40_8_svhn': wrn40_8_svhn,
'wrn20_10_1bit_cifar10': wrn20_10_1bit_cifar10,
'wrn20_10_1bit_cifar100': wrn20_10_1bit_cifar100,
'wrn20_10_1bit_svhn': wrn20_10_1bit_svhn,
'wrn20_10_32bit_cifar10': wrn20_10_32bit_cifar10,
'wrn20_10_32bit_cifar100': wrn20_10_32bit_cifar100,
'wrn20_10_32bit_svhn': wrn20_10_32bit_svhn,
'ror3_56_cifar10': ror3_56_cifar10,
'ror3_56_cifar100': ror3_56_cifar100,
'ror3_56_svhn': ror3_56_svhn,
'ror3_110_cifar10': ror3_110_cifar10,
'ror3_110_cifar100': ror3_110_cifar100,
'ror3_110_svhn': ror3_110_svhn,
'ror3_164_cifar10': ror3_164_cifar10,
'ror3_164_cifar100': ror3_164_cifar100,
'ror3_164_svhn': ror3_164_svhn,
'rir_cifar10': rir_cifar10,
'rir_cifar100': rir_cifar100,
'rir_svhn': rir_svhn,
'msdnet22_cifar10': msdnet22_cifar10,
'resdropresnet20_cifar10': resdropresnet20_cifar10,
'resdropresnet20_cifar100': resdropresnet20_cifar100,
'resdropresnet20_svhn': resdropresnet20_svhn,
'shakeshakeresnet20_2x16d_cifar10': shakeshakeresnet20_2x16d_cifar10,
'shakeshakeresnet20_2x16d_cifar100': shakeshakeresnet20_2x16d_cifar100,
'shakeshakeresnet20_2x16d_svhn': shakeshakeresnet20_2x16d_svhn,
'shakeshakeresnet26_2x32d_cifar10': shakeshakeresnet26_2x32d_cifar10,
'shakeshakeresnet26_2x32d_cifar100': shakeshakeresnet26_2x32d_cifar100,
'shakeshakeresnet26_2x32d_svhn': shakeshakeresnet26_2x32d_svhn,
'shakedropresnet20_cifar10': shakedropresnet20_cifar10,
'shakedropresnet20_cifar100': shakedropresnet20_cifar100,
'shakedropresnet20_svhn': shakedropresnet20_svhn,
'fractalnet_cifar10': fractalnet_cifar10,
'fractalnet_cifar100': fractalnet_cifar100,
'diaresnet20_cifar10': diaresnet20_cifar10,
'diaresnet20_cifar100': diaresnet20_cifar100,
'diaresnet20_svhn': diaresnet20_svhn,
'diaresnet56_cifar10': diaresnet56_cifar10,
'diaresnet56_cifar100': diaresnet56_cifar100,
'diaresnet56_svhn': diaresnet56_svhn,
'diaresnet110_cifar10': diaresnet110_cifar10,
'diaresnet110_cifar100': diaresnet110_cifar100,
'diaresnet110_svhn': diaresnet110_svhn,
'diaresnet164bn_cifar10': diaresnet164bn_cifar10,
'diaresnet164bn_cifar100': diaresnet164bn_cifar100,
'diaresnet164bn_svhn': diaresnet164bn_svhn,
'diaresnet1001_cifar10': diaresnet1001_cifar10,
'diaresnet1001_cifar100': diaresnet1001_cifar100,
'diaresnet1001_svhn': diaresnet1001_svhn,
'diaresnet1202_cifar10': diaresnet1202_cifar10,
'diaresnet1202_cifar100': diaresnet1202_cifar100,
'diaresnet1202_svhn': diaresnet1202_svhn,
'diapreresnet20_cifar10': diapreresnet20_cifar10,
'diapreresnet20_cifar100': diapreresnet20_cifar100,
'diapreresnet20_svhn': diapreresnet20_svhn,
'diapreresnet56_cifar10': diapreresnet56_cifar10,
'diapreresnet56_cifar100': diapreresnet56_cifar100,
'diapreresnet56_svhn': diapreresnet56_svhn,
'diapreresnet110_cifar10': diapreresnet110_cifar10,
'diapreresnet110_cifar100': diapreresnet110_cifar100,
'diapreresnet110_svhn': diapreresnet110_svhn,
'diapreresnet164bn_cifar10': diapreresnet164bn_cifar10,
'diapreresnet164bn_cifar100': diapreresnet164bn_cifar100,
'diapreresnet164bn_svhn': diapreresnet164bn_svhn,
'diapreresnet1001_cifar10': diapreresnet1001_cifar10,
'diapreresnet1001_cifar100': diapreresnet1001_cifar100,
'diapreresnet1001_svhn': diapreresnet1001_svhn,
'diapreresnet1202_cifar10': diapreresnet1202_cifar10,
'diapreresnet1202_cifar100': diapreresnet1202_cifar100,
'diapreresnet1202_svhn': diapreresnet1202_svhn,
'isqrtcovresnet18': isqrtcovresnet18,
'isqrtcovresnet34': isqrtcovresnet34,
'isqrtcovresnet50': isqrtcovresnet50,
'isqrtcovresnet50b': isqrtcovresnet50b,
'isqrtcovresnet101': isqrtcovresnet101,
'isqrtcovresnet101b': isqrtcovresnet101b,
'resneta10': resneta10,
'resnetabc14b': resnetabc14b,
'resneta18': resneta18,
'resneta50b': resneta50b,
'resneta101b': resneta101b,
'resneta152b': resneta152b,
'resnetd50b': resnetd50b,
'resnetd101b': resnetd101b,
'resnetd152b': resnetd152b,
'fastseresnet101b': fastseresnet101b,
'octresnet10_ad2': octresnet10_ad2,
'octresnet50b_ad2': octresnet50b_ad2,
'resnet10_cub': resnet10_cub,
'resnet12_cub': resnet12_cub,
'resnet14_cub': resnet14_cub,
'resnetbc14b_cub': resnetbc14b_cub,
'resnet16_cub': resnet16_cub,
'resnet18_cub': resnet18_cub,
'resnet26_cub': resnet26_cub,
'resnetbc26b_cub': resnetbc26b_cub,
'resnet34_cub': resnet34_cub,
'resnetbc38b_cub': resnetbc38b_cub,
'resnet50_cub': resnet50_cub,
'resnet50b_cub': resnet50b_cub,
'resnet101_cub': resnet101_cub,
'resnet101b_cub': resnet101b_cub,
'resnet152_cub': resnet152_cub,
'resnet152b_cub': resnet152b_cub,
'resnet200_cub': resnet200_cub,
'resnet200b_cub': resnet200b_cub,
'seresnet10_cub': seresnet10_cub,
'seresnet12_cub': seresnet12_cub,
'seresnet14_cub': seresnet14_cub,
'seresnetbc14b_cub': seresnetbc14b_cub,
'seresnet16_cub': seresnet16_cub,
'seresnet18_cub': seresnet18_cub,
'seresnet26_cub': seresnet26_cub,
'seresnetbc26b_cub': seresnetbc26b_cub,
'seresnet34_cub': seresnet34_cub,
'seresnetbc38b_cub': seresnetbc38b_cub,
'seresnet50_cub': seresnet50_cub,
'seresnet50b_cub': seresnet50b_cub,
'seresnet101_cub': seresnet101_cub,
'seresnet101b_cub': seresnet101b_cub,
'seresnet152_cub': seresnet152_cub,
'seresnet152b_cub': seresnet152b_cub,
'seresnet200_cub': seresnet200_cub,
'seresnet200b_cub': seresnet200b_cub,
'mobilenet_w1_cub': mobilenet_w1_cub,
'mobilenet_w3d4_cub': mobilenet_w3d4_cub,
'mobilenet_wd2_cub': mobilenet_wd2_cub,
'mobilenet_wd4_cub': mobilenet_wd4_cub,
'fdmobilenet_w1_cub': fdmobilenet_w1_cub,
'fdmobilenet_w3d4_cub': fdmobilenet_w3d4_cub,
'fdmobilenet_wd2_cub': fdmobilenet_wd2_cub,
'fdmobilenet_wd4_cub': fdmobilenet_wd4_cub,
'proxylessnas_cpu_cub': proxylessnas_cpu_cub,
'proxylessnas_gpu_cub': proxylessnas_gpu_cub,
'proxylessnas_mobile_cub': proxylessnas_mobile_cub,
'proxylessnas_mobile14_cub': proxylessnas_mobile14_cub,
'ntsnet_cub': ntsnet_cub,
'fcn8sd_resnetd50b_voc': fcn8sd_resnetd50b_voc,
'fcn8sd_resnetd101b_voc': fcn8sd_resnetd101b_voc,
'fcn8sd_resnetd50b_coco': fcn8sd_resnetd50b_coco,
'fcn8sd_resnetd101b_coco': fcn8sd_resnetd101b_coco,
'fcn8sd_resnetd50b_ade20k': fcn8sd_resnetd50b_ade20k,
'fcn8sd_resnetd101b_ade20k': fcn8sd_resnetd101b_ade20k,
'fcn8sd_resnetd50b_cityscapes': fcn8sd_resnetd50b_cityscapes,
'fcn8sd_resnetd101b_cityscapes': fcn8sd_resnetd101b_cityscapes,
'pspnet_resnetd50b_voc': pspnet_resnetd50b_voc,
'pspnet_resnetd101b_voc': pspnet_resnetd101b_voc,
'pspnet_resnetd50b_coco': pspnet_resnetd50b_coco,
'pspnet_resnetd101b_coco': pspnet_resnetd101b_coco,
'pspnet_resnetd50b_ade20k': pspnet_resnetd50b_ade20k,
'pspnet_resnetd101b_ade20k': pspnet_resnetd101b_ade20k,
'pspnet_resnetd50b_cityscapes': pspnet_resnetd50b_cityscapes,
'pspnet_resnetd101b_cityscapes': pspnet_resnetd101b_cityscapes,
'deeplabv3_resnetd50b_voc': deeplabv3_resnetd50b_voc,
'deeplabv3_resnetd101b_voc': deeplabv3_resnetd101b_voc,
'deeplabv3_resnetd152b_voc': deeplabv3_resnetd152b_voc,
'deeplabv3_resnetd50b_coco': deeplabv3_resnetd50b_coco,
'deeplabv3_resnetd101b_coco': deeplabv3_resnetd101b_coco,
'deeplabv3_resnetd152b_coco': deeplabv3_resnetd152b_coco,
'deeplabv3_resnetd50b_ade20k': deeplabv3_resnetd50b_ade20k,
'deeplabv3_resnetd101b_ade20k': deeplabv3_resnetd101b_ade20k,
'deeplabv3_resnetd50b_cityscapes': deeplabv3_resnetd50b_cityscapes,
'deeplabv3_resnetd101b_cityscapes': deeplabv3_resnetd101b_cityscapes,
'icnet_resnetd50b_cityscapes': icnet_resnetd50b_cityscapes,
'fastscnn_cityscapes': fastscnn_cityscapes,
'cgnet_cityscapes': cgnet_cityscapes,
'dabnet_cityscapes': dabnet_cityscapes,
'sinet_cityscapes': sinet_cityscapes,
'bisenet_resnet18_celebamaskhq': bisenet_resnet18_celebamaskhq,
'danet_resnetd50b_cityscapes': danet_resnetd50b_cityscapes,
'danet_resnetd101b_cityscapes': danet_resnetd101b_cityscapes,
'fpenet_cityscapes': fpenet_cityscapes,
'ctxnet_cityscapes': ctxnet_cityscapes,
'lednet_cityscapes': lednet_cityscapes,
'esnet_cityscapes': esnet_cityscapes,
'edanet_cityscapes': edanet_cityscapes,
'enet_cityscapes': enet_cityscapes,
'erfnet_cityscapes': erfnet_cityscapes,
'linknet_cityscapes': linknet_cityscapes,
'segnet_cityscapes': segnet_cityscapes,
'unet_cityscapes': unet_cityscapes,
'sqnet_cityscapes': sqnet_cityscapes,
'alphapose_fastseresnet101b_coco': alphapose_fastseresnet101b_coco,
'simplepose_resnet18_coco': simplepose_resnet18_coco,
'simplepose_resnet50b_coco': simplepose_resnet50b_coco,
'simplepose_resnet101b_coco': simplepose_resnet101b_coco,
'simplepose_resnet152b_coco': simplepose_resnet152b_coco,
'simplepose_resneta50b_coco': simplepose_resneta50b_coco,
'simplepose_resneta101b_coco': simplepose_resneta101b_coco,
'simplepose_resneta152b_coco': simplepose_resneta152b_coco,
'simplepose_mobile_resnet18_coco': simplepose_mobile_resnet18_coco,
'simplepose_mobile_resnet50b_coco': simplepose_mobile_resnet50b_coco,
'simplepose_mobile_mobilenet_w1_coco': simplepose_mobile_mobilenet_w1_coco,
'simplepose_mobile_mobilenetv2b_w1_coco': simplepose_mobile_mobilenetv2b_w1_coco,
'simplepose_mobile_mobilenetv3_small_w1_coco': simplepose_mobile_mobilenetv3_small_w1_coco,
'simplepose_mobile_mobilenetv3_large_w1_coco': simplepose_mobile_mobilenetv3_large_w1_coco,
'lwopenpose2d_mobilenet_cmupan_coco': lwopenpose2d_mobilenet_cmupan_coco,
'lwopenpose3d_mobilenet_cmupan_coco': lwopenpose3d_mobilenet_cmupan_coco,
'ibppose_coco': ibppose_coco,
'prnet': prnet,
'centernet_resnet18_voc': centernet_resnet18_voc,
'centernet_resnet18_coco': centernet_resnet18_coco,
'centernet_resnet50b_voc': centernet_resnet50b_voc,
'centernet_resnet50b_coco': centernet_resnet50b_coco,
'centernet_resnet101b_voc': centernet_resnet101b_voc,
'centernet_resnet101b_coco': centernet_resnet101b_coco,
'lffd20x5s320v2_widerface': lffd20x5s320v2_widerface,
'lffd25x8s560v1_widerface': lffd25x8s560v1_widerface,
'pfpcnet': pfpcnet,
'voca8flame': voca8flame,
'nvpattexp116bazel76': nvpattexp116bazel76,
'superpointnet': superpointnet,
'jasper5x3': jasper5x3,
'jasper10x4': jasper10x4,
'jasper10x5': jasper10x5,
'jasperdr10x5_en': jasperdr10x5_en,
'jasperdr10x5_en_nr': jasperdr10x5_en_nr,
'quartznet5x5_en_ls': quartznet5x5_en_ls,
'quartznet15x5_en': quartznet15x5_en,
'quartznet15x5_en_nr': quartznet15x5_en_nr,
'quartznet15x5_fr': quartznet15x5_fr,
'quartznet15x5_de': quartznet15x5_de,
'quartznet15x5_it': quartznet15x5_it,
'quartznet15x5_es': quartznet15x5_es,
'quartznet15x5_ca': quartznet15x5_ca,
'quartznet15x5_pl': quartznet15x5_pl,
'quartznet15x5_ru': quartznet15x5_ru,
'quartznet15x5_ru34': quartznet15x5_ru34,
# 'oth_quartznet5x5_en_ls': oth_quartznet5x5_en_ls,
# 'oth_quartznet15x5_en': oth_quartznet15x5_en,
# 'oth_quartznet15x5_en_nr': oth_quartznet15x5_en_nr,
# 'oth_quartznet15x5_fr': oth_quartznet15x5_fr,
# 'oth_quartznet15x5_de': oth_quartznet15x5_de,
# 'oth_quartznet15x5_it': oth_quartznet15x5_it,
# 'oth_quartznet15x5_es': oth_quartznet15x5_es,
# 'oth_quartznet15x5_ca': oth_quartznet15x5_ca,
# 'oth_quartznet15x5_pl': oth_quartznet15x5_pl,
# 'oth_quartznet15x5_ru': oth_quartznet15x5_ru,
# 'oth_jasperdr10x5_en': oth_jasperdr10x5_en,
# 'oth_jasperdr10x5_en_nr': oth_jasperdr10x5_en_nr,
# 'oth_quartznet15x5_ru34': oth_quartznet15x5_ru34,
# 'oth_pose_coco_resnet_50_256x192': oth_pose_coco_resnet_50_256x192,
# 'oth_pose_coco_resnet_50_384x288': oth_pose_coco_resnet_50_384x288,
# 'oth_pose_coco_resnet_101_256x192': oth_pose_coco_resnet_101_256x192,
# 'oth_pose_coco_resnet_101_384x288': oth_pose_coco_resnet_101_384x288,
# 'oth_pose_coco_resnet_152_256x192': oth_pose_coco_resnet_152_256x192,
# 'oth_pose_coco_resnet_152_384x288': oth_pose_coco_resnet_152_384x288,
# 'oth_lwopenpose2d': oth_lwopenpose2d,
# 'oth_lwopenpose3d': oth_lwopenpose3d,
# 'oth_prnet': oth_prnet,
# 'oth_sinet_cityscapes': oth_sinet_cityscapes,
# 'oth_ibppose': oth_ibppose,
# 'oth_bisenet': oth_bisenet,
# 'oth_tresnet_m': oth_tresnet_m,
# 'tresnet_m': tresnet_m,
# 'oth_dabnet_cityscapes': oth_dabnet_cityscapes,
}
def get_model(name, **kwargs):
"""
Get supported model.
Parameters:
----------
name : str
Name of model.
Returns:
-------
Module
Resulted model.
"""
name = name.lower()
if name not in _models:
raise ValueError("Unsupported model: {}".format(name))
net = _models[name](**kwargs)
return net
| 43,673
| 34.363563
| 95
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/airnext.py
|
"""
AirNeXt for ImageNet-1K, implemented in PyTorch.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16']
import os
import math
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
from .airnet import AirBlock, AirInitBlock
class AirNeXtBottleneck(nn.Module):
"""
AirNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
ratio):
super(AirNeXtBottleneck, self).__init__()
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.use_air_block = (stride == 1 and mid_channels < 512)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
stride=stride,
groups=cardinality)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=group_width,
groups=(cardinality // ratio),
ratio=ratio)
def forward(self, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirNeXtUnit(nn.Module):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality,
bottleneck_width,
ratio):
super(AirNeXtUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = AirNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirNeXt(nn.Module):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
ratio,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(AirNeXt, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), AirNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_airnext(blocks,
cardinality,
bottleneck_width,
base_channels,
ratio,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def airnext50_32x4d_r2(**kwargs):
"""
AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=50,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext50_32x4d_r2",
**kwargs)
def airnext101_32x4d_r2(**kwargs):
"""
AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext101_32x4d_r2",
**kwargs)
def airnext101_32x4d_r16(**kwargs):
"""
AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=16,
model_name="airnext101_32x4d_r16",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
airnext50_32x4d_r2,
airnext101_32x4d_r2,
airnext101_32x4d_r16,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnext50_32x4d_r2 or weight_count == 27604296)
assert (model != airnext101_32x4d_r2 or weight_count == 54099272)
assert (model != airnext101_32x4d_r16 or weight_count == 45456456)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 11,535
| 29.041667
| 115
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/pspnet.py
|
"""
PSPNet for image segmentation, implemented in PyTorch.
Original paper: 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
"""
__all__ = ['PSPNet', 'pspnet_resnetd50b_voc', 'pspnet_resnetd101b_voc', 'pspnet_resnetd50b_coco',
'pspnet_resnetd101b_coco', 'pspnet_resnetd50b_ade20k', 'pspnet_resnetd101b_ade20k',
'pspnet_resnetd50b_cityscapes', 'pspnet_resnetd101b_cityscapes', 'PyramidPooling']
import os
import torch.nn as nn
import torch.nn.functional as F
from .common import conv1x1, conv1x1_block, conv3x3_block, Concurrent, Identity
from .resnetd import resnetd50b, resnetd101b
class PSPFinalBlock(nn.Module):
"""
PSPNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4):
super(PSPFinalBlock, self).__init__()
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(p=0.1, inplace=False)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
bias=True)
def forward(self, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.interpolate(x, size=out_size, mode="bilinear", align_corners=True)
return x
class PyramidPoolingBranch(nn.Module):
"""
Pyramid Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_out_size : int
Target output size of the image.
upscale_out_size : tuple of 2 int
Spatial size of output image for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
pool_out_size,
upscale_out_size):
super(PyramidPoolingBranch, self).__init__()
self.upscale_out_size = upscale_out_size
self.pool = nn.AdaptiveAvgPool2d(pool_out_size)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def forward(self, x):
in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:]
x = self.pool(x)
x = self.conv(x)
x = F.interpolate(x, size=in_size, mode="bilinear", align_corners=True)
return x
class PyramidPooling(nn.Module):
"""
Pyramid Pooling module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
upscale_out_size):
super(PyramidPooling, self).__init__()
pool_out_sizes = [1, 2, 3, 6]
assert (len(pool_out_sizes) == 4)
assert (in_channels % 4 == 0)
mid_channels = in_channels // 4
self.branches = Concurrent()
self.branches.add_module("branch1", Identity())
for i, pool_out_size in enumerate(pool_out_sizes):
self.branches.add_module("branch{}".format(i + 2), PyramidPoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
pool_out_size=pool_out_size,
upscale_out_size=upscale_out_size))
def forward(self, x):
x = self.branches(x)
return x
class PSPNet(nn.Module):
"""
PSPNet model from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
num_classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
num_classes=21):
super(PSPNet, self).__init__()
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.num_classes = num_classes
self.aux = aux
self.fixed_size = fixed_size
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = PyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size)
pool_out_channels = 2 * backbone_out_channels
self.final_block = PSPFinalBlock(
in_channels=pool_out_channels,
out_channels=num_classes,
bottleneck_factor=8)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = PSPFinalBlock(
in_channels=aux_out_channels,
out_channels=num_classes,
bottleneck_factor=4)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
nn.init.kaiming_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_pspnet(backbone,
num_classes,
aux=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create PSPNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
num_classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
net = PSPNet(
backbone=backbone,
num_classes=num_classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def pspnet_resnetd50b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_voc", **kwargs)
def pspnet_resnetd101b_voc(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_voc",
**kwargs)
def pspnet_resnetd50b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_coco",
**kwargs)
def pspnet_resnetd101b_coco(pretrained_backbone=False, num_classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_coco",
**kwargs)
def pspnet_resnetd50b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_ade20k",
**kwargs)
def pspnet_resnetd101b_ade20k(pretrained_backbone=False, num_classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_ade20k",
**kwargs)
def pspnet_resnetd50b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd50b_cityscapes",
**kwargs)
def pspnet_resnetd101b_cityscapes(pretrained_backbone=False, num_classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
num_classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features
del backbone[-1]
return get_pspnet(backbone=backbone, num_classes=num_classes, aux=aux, model_name="pspnet_resnetd101b_cityscapes",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
in_size = (480, 480)
aux = False
pretrained = False
models = [
(pspnet_resnetd50b_voc, 21),
(pspnet_resnetd101b_voc, 21),
(pspnet_resnetd50b_coco, 21),
(pspnet_resnetd101b_coco, 21),
(pspnet_resnetd50b_ade20k, 150),
(pspnet_resnetd101b_ade20k, 150),
(pspnet_resnetd50b_cityscapes, 19),
(pspnet_resnetd101b_cityscapes, 19),
]
for model, num_classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != pspnet_resnetd50b_voc or weight_count == 49081578)
assert (model != pspnet_resnetd101b_voc or weight_count == 68073706)
assert (model != pspnet_resnetd50b_coco or weight_count == 49081578)
assert (model != pspnet_resnetd101b_coco or weight_count == 68073706)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 49180908)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 68173036)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 49080038)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 68072166)
else:
assert (model != pspnet_resnetd50b_voc or weight_count == 46716373)
assert (model != pspnet_resnetd101b_voc or weight_count == 65708501)
assert (model != pspnet_resnetd50b_coco or weight_count == 46716373)
assert (model != pspnet_resnetd101b_coco or weight_count == 65708501)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 46782550)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 65774678)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 46715347)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 65707475)
x = torch.randn(1, 3, in_size[0], in_size[1])
ys = net(x)
y = ys[0] if aux else ys
y.sum().backward()
assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and
(y.size(3) == x.size(3)))
if __name__ == "__main__":
_test()
| 18,380
| 35.909639
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/dla.py
|
"""
DLA for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
"""
__all__ = ['DLA', 'dla34', 'dla46c', 'dla46xc', 'dla60', 'dla60x', 'dla60xc', 'dla102', 'dla102x', 'dla102x2', 'dla169']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, conv1x1_block, conv3x3_block, conv7x7_block
from .resnet import ResBlock, ResBottleneck
from .resnext import ResNeXtBottleneck
class DLABottleneck(ResBottleneck):
"""
DLA bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 2
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck_factor=2):
super(DLABottleneck, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck_factor=bottleneck_factor)
class DLABottleneckX(ResNeXtBottleneck):
"""
DLA ResNeXt-like bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int, default 32
Number of groups.
bottleneck_width: int, default 8
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
cardinality=32,
bottleneck_width=8):
super(DLABottleneckX, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
cardinality=cardinality,
bottleneck_width=bottleneck_width)
class DLAResBlock(nn.Module):
"""
DLA residual block with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
body_class : nn.Module, default ResBlock
Residual block body class.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
in_channels,
out_channels,
stride,
body_class=ResBlock,
return_down=False):
super(DLAResBlock, self).__init__()
self.return_down = return_down
self.downsample = (stride > 1)
self.project = (in_channels != out_channels)
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.activ = nn.ReLU(inplace=True)
if self.downsample:
self.downsample_pool = nn.MaxPool2d(
kernel_size=stride,
stride=stride)
if self.project:
self.project_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
def forward(self, x):
down = self.downsample_pool(x) if self.downsample else x
identity = self.project_conv(down) if self.project else down
if identity is None:
identity = x
x = self.body(x)
x += identity
x = self.activ(x)
if self.return_down:
return x, down
else:
return x
class DLARoot(nn.Module):
"""
DLA root block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
residual):
super(DLARoot, self).__init__()
self.residual = residual
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x2, x1, extra):
last_branch = x2
x = torch.cat((x2, x1) + tuple(extra), dim=1)
x = self.conv(x)
if self.residual:
x += last_branch
x = self.activ(x)
return x
class DLATree(nn.Module):
"""
DLA tree unit. It's like iterative stage.
Parameters:
----------
levels : int
Number of levels in the stage.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
res_body_class : nn.Module
Residual block body class.
stride : int or tuple/list of 2 int
Strides of the convolution in a residual block.
root_residual : bool
Whether use residual connection in the root.
root_dim : int
Number of input channels in the root block.
first_tree : bool, default False
Is this tree stage the first stage in the net.
input_level : bool, default True
Is this tree unit the first unit in the stage.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
levels,
in_channels,
out_channels,
res_body_class,
stride,
root_residual,
root_dim=0,
first_tree=False,
input_level=True,
return_down=False):
super(DLATree, self).__init__()
self.return_down = return_down
self.add_down = (input_level and not first_tree)
self.root_level = (levels == 1)
if root_dim == 0:
root_dim = 2 * out_channels
if self.add_down:
root_dim += in_channels
if self.root_level:
self.tree1 = DLAResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
body_class=res_body_class,
return_down=True)
self.tree2 = DLAResBlock(
in_channels=out_channels,
out_channels=out_channels,
stride=1,
body_class=res_body_class,
return_down=False)
else:
self.tree1 = DLATree(
levels=levels - 1,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
stride=stride,
root_residual=root_residual,
root_dim=0,
input_level=False,
return_down=True)
self.tree2 = DLATree(
levels=levels - 1,
in_channels=out_channels,
out_channels=out_channels,
res_body_class=res_body_class,
stride=1,
root_residual=root_residual,
root_dim=root_dim + out_channels,
input_level=False,
return_down=False)
if self.root_level:
self.root = DLARoot(
in_channels=root_dim,
out_channels=out_channels,
residual=root_residual)
def forward(self, x, extra=None):
extra = [] if extra is None else extra
x1, down = self.tree1(x)
if self.add_down:
extra.append(down)
if self.root_level:
x2 = self.tree2(x1)
x = self.root(x2, x1, extra)
else:
extra.append(x1)
x = self.tree2(x1, extra)
if self.return_down:
return x, down
else:
return x
class DLAInitBlock(nn.Module):
"""
DLA specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(DLAInitBlock, self).__init__()
mid_channels = out_channels // 2
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DLA(nn.Module):
"""
DLA model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
init_block_channels : int
Number of output channels for the initial unit.
res_body_class : nn.Module
Residual block body class.
residual_root : bool
Whether use residual connection in the root blocks.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
levels,
channels,
init_block_channels,
res_body_class,
residual_root,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(DLA, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", DLAInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i in range(len(levels)):
levels_i = levels[i]
out_channels = channels[i]
first_tree = (i == 0)
self.features.add_module("stage{}".format(i + 1), DLATree(
levels=levels_i,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
stride=2,
root_residual=residual_root,
first_tree=first_tree))
in_channels = out_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = conv1x1(
in_channels=in_channels,
out_channels=num_classes,
bias=True)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_dla(levels,
channels,
res_body_class,
residual_root=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create DLA model with specific parameters.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
res_body_class : nn.Module
Residual block body class.
residual_root : bool, default False
Whether use residual connection in the root blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
net = DLA(
levels=levels,
channels=channels,
init_block_channels=init_block_channels,
res_body_class=res_body_class,
residual_root=residual_root,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def dla34(**kwargs):
"""
DLA-34 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 128, 256, 512], res_body_class=ResBlock, model_name="dla34",
**kwargs)
def dla46c(**kwargs):
"""
DLA-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name="dla46c",
**kwargs)
def dla46xc(**kwargs):
"""
DLA-X-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla46xc", **kwargs)
def dla60(**kwargs):
"""
DLA-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
model_name="dla60", **kwargs)
def dla60x(**kwargs):
"""
DLA-X-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
model_name="dla60x", **kwargs)
def dla60xc(**kwargs):
"""
DLA-X-60-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla60xc", **kwargs)
def dla102(**kwargs):
"""
DLA-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla102", **kwargs)
def dla102x(**kwargs):
"""
DLA-X-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
residual_root=True, model_name="dla102x", **kwargs)
def dla102x2(**kwargs):
"""
DLA-X2-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
class DLABottleneckX64(DLABottleneckX):
def __init__(self, in_channels, out_channels, stride):
super(DLABottleneckX64, self).__init__(in_channels, out_channels, stride, cardinality=64)
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX64,
residual_root=True, model_name="dla102x2", **kwargs)
def dla169(**kwargs):
"""
DLA-169 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[2, 3, 5, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla169", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
dla34,
dla46c,
dla46xc,
dla60,
dla60x,
dla60xc,
dla102,
dla102x,
dla102x2,
dla169,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dla34 or weight_count == 15742104)
assert (model != dla46c or weight_count == 1301400)
assert (model != dla46xc or weight_count == 1068440)
assert (model != dla60 or weight_count == 22036632)
assert (model != dla60x or weight_count == 17352344)
assert (model != dla60xc or weight_count == 1319832)
assert (model != dla102 or weight_count == 33268888)
assert (model != dla102x or weight_count == 26309272)
assert (model != dla102x2 or weight_count == 41282200)
assert (model != dla169 or weight_count == 53389720)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 19,884
| 29.734158
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/proxylessnas.py
|
"""
ProxylessNAS for ImageNet-1K, implemented in PyTorch.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['ProxylessNAS', 'proxylessnas_cpu', 'proxylessnas_gpu', 'proxylessnas_mobile', 'proxylessnas_mobile14',
'ProxylessUnit', 'get_proxylessnas']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import ConvBlock, conv1x1_block, conv3x3_block
class ProxylessBlock(nn.Module):
"""
ProxylessNAS block for residual path in ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
stride : int
Strides of the convolution.
bn_eps : float
Small float added to variance in Batch norm.
expansion : int
Expansion ratio.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
bn_eps,
expansion):
super(ProxylessBlock, self).__init__()
self.use_bc = (expansion > 1)
mid_channels = in_channels * expansion
if self.use_bc:
self.bc_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_eps=bn_eps,
activation="relu6")
padding = (kernel_size - 1) // 2
self.dw_conv = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=mid_channels,
bn_eps=bn_eps,
activation="relu6")
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_eps=bn_eps,
activation=None)
def forward(self, x):
if self.use_bc:
x = self.bc_conv(x)
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class ProxylessUnit(nn.Module):
"""
ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for body block.
stride : int
Strides of the convolution.
bn_eps : float
Small float added to variance in Batch norm.
expansion : int
Expansion ratio for body block.
residual : bool
Whether to use residual branch.
shortcut : bool
Whether to use identity branch.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
bn_eps,
expansion,
residual,
shortcut):
super(ProxylessUnit, self).__init__()
assert (residual or shortcut)
self.residual = residual
self.shortcut = shortcut
if self.residual:
self.body = ProxylessBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
bn_eps=bn_eps,
expansion=expansion)
def forward(self, x):
if not self.residual:
return x
if not self.shortcut:
return self.body(x)
identity = x
x = self.body(x)
x = identity + x
return x
class ProxylessNAS(nn.Module):
"""
ProxylessNAS model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
residuals : list of list of int
Whether to use residual branch in units.
shortcuts : list of list of int
Whether to use identity branch in units.
kernel_sizes : list of list of int
Convolution window size for each units.
expansions : list of list of int
Expansion ratio for each units.
bn_eps : float, default 1e-3
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
residuals,
shortcuts,
kernel_sizes,
expansions,
bn_eps=1e-3,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ProxylessNAS, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
stride=2,
bn_eps=bn_eps,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
residuals_per_stage = residuals[i]
shortcuts_per_stage = shortcuts[i]
kernel_sizes_per_stage = kernel_sizes[i]
expansions_per_stage = expansions[i]
for j, out_channels in enumerate(channels_per_stage):
residual = (residuals_per_stage[j] == 1)
shortcut = (shortcuts_per_stage[j] == 1)
kernel_size = kernel_sizes_per_stage[j]
expansion = expansions_per_stage[j]
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), ProxylessUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
bn_eps=bn_eps,
expansion=expansion,
residual=residual,
shortcut=shortcut))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_eps=bn_eps,
activation="relu6"))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_proxylessnas(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ProxylessNAS model with specific parameters.
Parameters:
----------
version : str
Version of ProxylessNAS ('cpu', 'gpu', 'mobile' or 'mobile14').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "cpu":
residuals = [[1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [48, 48, 48, 48], [88, 88, 88, 88, 104, 104, 104, 104],
[216, 216, 216, 216, 360]]
kernel_sizes = [[3], [3, 3, 3, 3], [3, 3, 3, 5], [3, 3, 3, 3, 5, 3, 3, 3], [5, 5, 5, 3, 5]]
expansions = [[1], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 3, 3, 3, 6]]
init_block_channels = 40
final_block_channels = 1432
elif version == "gpu":
residuals = [[1], [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [56, 56, 56, 56], [112, 112, 112, 112, 128, 128, 128, 128],
[256, 256, 256, 256, 432]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 3, 3], [7, 5, 5, 5, 5, 3, 3, 5], [7, 7, 7, 5, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 6, 6, 6]]
init_block_channels = 40
final_block_channels = 1728
elif version == "mobile":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[16], [32, 32, 32, 32], [40, 40, 40, 40], [80, 80, 80, 80, 96, 96, 96, 96],
[192, 192, 192, 192, 320]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 32
final_block_channels = 1280
elif version == "mobile14":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [40, 40, 40, 40], [56, 56, 56, 56], [112, 112, 112, 112, 136, 136, 136, 136],
[256, 256, 256, 256, 448]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 48
final_block_channels = 1792
else:
raise ValueError("Unsupported ProxylessNAS version: {}".format(version))
shortcuts = [[0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0]]
net = ProxylessNAS(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
residuals=residuals,
shortcuts=shortcuts,
kernel_sizes=kernel_sizes,
expansions=expansions,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def proxylessnas_cpu(**kwargs):
"""
ProxylessNAS (CPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="cpu", model_name="proxylessnas_cpu", **kwargs)
def proxylessnas_gpu(**kwargs):
"""
ProxylessNAS (GPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="gpu", model_name="proxylessnas_gpu", **kwargs)
def proxylessnas_mobile(**kwargs):
"""
ProxylessNAS (Mobile) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile", model_name="proxylessnas_mobile", **kwargs)
def proxylessnas_mobile14(**kwargs):
"""
ProxylessNAS (Mobile-14) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile14", model_name="proxylessnas_mobile14", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
proxylessnas_cpu,
proxylessnas_gpu,
proxylessnas_mobile,
proxylessnas_mobile14,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu or weight_count == 4361648)
assert (model != proxylessnas_gpu or weight_count == 7119848)
assert (model != proxylessnas_mobile or weight_count == 4080512)
assert (model != proxylessnas_mobile14 or weight_count == 6857568)
x = torch.randn(14, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, 1000))
if __name__ == "__main__":
_test()
| 14,555
| 33.492891
| 118
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/isqrtcovresnet.py
|
"""
iSQRT-COV-ResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root
Normalization,' https://arxiv.org/abs/1712.01034.
"""
__all__ = ['iSQRTCOVResNet', 'isqrtcovresnet18', 'isqrtcovresnet34', 'isqrtcovresnet50', 'isqrtcovresnet50b',
'isqrtcovresnet101', 'isqrtcovresnet101b']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block
from .resnet import ResUnit, ResInitBlock
class CovPool(torch.autograd.Function):
"""
Covariance pooling function.
"""
@staticmethod
def forward(ctx, x):
batch, channels, height, width = x.size()
n = height * width
xn = x.reshape(batch, channels, n)
identity_bar = ((1.0 / n) * torch.eye(n, dtype=xn.dtype, device=xn.device)).unsqueeze(dim=0).repeat(batch, 1, 1)
ones_bar = torch.full((batch, n, n), fill_value=(-1.0 / n / n), dtype=xn.dtype, device=xn.device)
i_bar = identity_bar + ones_bar
sigma = xn.bmm(i_bar).bmm(xn.transpose(1, 2))
ctx.save_for_backward(x, i_bar)
return sigma
@staticmethod
def backward(ctx, grad_sigma):
x, i_bar = ctx.saved_tensors
batch, channels, height, width = x.size()
n = height * width
xn = x.reshape(batch, channels, n)
grad_x = grad_sigma + grad_sigma.transpose(1, 2)
grad_x = grad_x.bmm(xn).bmm(i_bar)
grad_x = grad_x.reshape(batch, channels, height, width)
return grad_x
class NewtonSchulzSqrt(torch.autograd.Function):
"""
Newton-Schulz iterative matrix square root function.
Parameters:
----------
x : Tensor
Input tensor (batch * cols * rows).
n : int
Number of iterations (n > 1).
"""
@staticmethod
def forward(ctx, x, n):
assert (n > 1)
batch, cols, rows = x.size()
assert (cols == rows)
m = cols
identity = torch.eye(m, dtype=x.dtype, device=x.device).unsqueeze(dim=0).repeat(batch, 1, 1)
x_trace = (x * identity).sum(dim=(1, 2), keepdim=True)
a = x / x_trace
i3 = 3.0 * identity
yi = torch.zeros(batch, n - 1, m, m, dtype=x.dtype, device=x.device)
zi = torch.zeros(batch, n - 1, m, m, dtype=x.dtype, device=x.device)
b2 = 0.5 * (i3 - a)
yi[:, 0, :, :] = a.bmm(b2)
zi[:, 0, :, :] = b2
for i in range(1, n - 1):
b2 = 0.5 * (i3 - zi[:, i - 1, :, :].bmm(yi[:, i - 1, :, :]))
yi[:, i, :, :] = yi[:, i - 1, :, :].bmm(b2)
zi[:, i, :, :] = b2.bmm(zi[:, i - 1, :, :])
b2 = 0.5 * (i3 - zi[:, n - 2, :, :].bmm(yi[:, n - 2, :, :]))
yn = yi[:, n - 2, :, :].bmm(b2)
x_trace_sqrt = torch.sqrt(x_trace)
c = yn * x_trace_sqrt
ctx.save_for_backward(x, x_trace, a, yi, zi, yn, x_trace_sqrt)
ctx.n = n
return c
@staticmethod
def backward(ctx, grad_c):
x, x_trace, a, yi, zi, yn, x_trace_sqrt = ctx.saved_tensors
n = ctx.n
batch, m, _ = x.size()
identity0 = torch.eye(m, dtype=x.dtype, device=x.device)
identity = identity0.unsqueeze(dim=0).repeat(batch, 1, 1)
i3 = 3.0 * identity
grad_yn = grad_c * x_trace_sqrt
b = i3 - yi[:, n - 2, :, :].bmm(zi[:, n - 2, :, :])
grad_yi = 0.5 * (grad_yn.bmm(b) - zi[:, n - 2, :, :].bmm(yi[:, n - 2, :, :]).bmm(grad_yn))
grad_zi = -0.5 * yi[:, n - 2, :, :].bmm(grad_yn).bmm(yi[:, n - 2, :, :])
for i in range(n - 3, -1, -1):
b = i3 - yi[:, i, :, :].bmm(zi[:, i, :, :])
ziyi = zi[:, i, :, :].bmm(yi[:, i, :, :])
grad_yi_m1 = 0.5 * (grad_yi.bmm(b) - zi[:, i, :, :].bmm(grad_zi).bmm(zi[:, i, :, :]) - ziyi.bmm(grad_yi))
grad_zi_m1 = 0.5 * (b.bmm(grad_zi) - yi[:, i, :, :].bmm(grad_yi).bmm(yi[:, i, :, :]) - grad_zi.bmm(ziyi))
grad_yi = grad_yi_m1
grad_zi = grad_zi_m1
grad_a = 0.5 * (grad_yi.bmm(i3 - a) - grad_zi - a.bmm(grad_yi))
x_trace_sqr = x_trace * x_trace
grad_atx_trace = (grad_a.transpose(1, 2).bmm(x) * identity).sum(dim=(1, 2), keepdim=True)
grad_cty_trace = (grad_c.transpose(1, 2).bmm(yn) * identity).sum(dim=(1, 2), keepdim=True)
grad_x_extra = (0.5 * grad_cty_trace / x_trace_sqrt - grad_atx_trace / x_trace_sqr).repeat(1, m, m) * identity
grad_x = grad_a / x_trace + grad_x_extra
return grad_x, None
class Triuvec(torch.autograd.Function):
"""
Extract upper triangular part of matrix into vector form.
"""
@staticmethod
def forward(ctx, x):
batch, cols, rows = x.size()
assert (cols == rows)
n = cols
triuvec_inds = torch.ones(n, n).triu().view(n * n).nonzero()
# assert (len(triuvec_inds) == n * (n + 1) // 2)
x_vec = x.reshape(batch, -1)
y = x_vec[:, triuvec_inds]
ctx.save_for_backward(x, triuvec_inds)
return y
@staticmethod
def backward(ctx, grad_y):
x, triuvec_inds = ctx.saved_tensors
batch, n, _ = x.size()
grad_x = torch.zeros_like(x).view(batch, -1)
grad_x[:, triuvec_inds] = grad_y
grad_x = grad_x.view(batch, n, n)
return grad_x
class iSQRTCOVPool(nn.Module):
"""
iSQRT-COV pooling layer.
Parameters:
----------
num_iter : int, default 5
Number of iterations (num_iter > 1).
"""
def __init__(self,
num_iter=5):
super(iSQRTCOVPool, self).__init__()
self.num_iter = num_iter
self.cov_pool = CovPool.apply
self.sqrt = NewtonSchulzSqrt.apply
self.triuvec = Triuvec.apply
def forward(self, x):
x = self.cov_pool(x)
x = self.sqrt(x, self.num_iter)
x = self.triuvec(x)
return x
class iSQRTCOVResNet(nn.Module):
"""
iSQRT-COV-ResNet model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(iSQRTCOVResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i not in [0, len(channels) - 1]) else 1
stage.add_module("unit{}".format(j + 1), ResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add_module("final_pool", iSQRTCOVPool())
in_features = in_channels * (in_channels + 1) // 2
self.output = nn.Linear(
in_features=in_features,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_isqrtcovresnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create iSQRT-COV-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported iSQRT-COV-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
final_block_channels = 256
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = iSQRTCOVResNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def isqrtcovresnet18(**kwargs):
"""
iSQRT-COV-ResNet-18 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=18, model_name="isqrtcovresnet18", **kwargs)
def isqrtcovresnet34(**kwargs):
"""
iSQRT-COV-ResNet-34 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=34, model_name="isqrtcovresnet34", **kwargs)
def isqrtcovresnet50(**kwargs):
"""
iSQRT-COV-ResNet-50 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=50, model_name="isqrtcovresnet50", **kwargs)
def isqrtcovresnet50b(**kwargs):
"""
iSQRT-COV-ResNet-50 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=50, conv1_stride=False, model_name="isqrtcovresnet50b", **kwargs)
def isqrtcovresnet101(**kwargs):
"""
iSQRT-COV-ResNet-101 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, model_name="isqrtcovresnet101", **kwargs)
def isqrtcovresnet101b(**kwargs):
"""
iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
isqrtcovresnet18,
isqrtcovresnet34,
isqrtcovresnet50,
isqrtcovresnet50b,
isqrtcovresnet101,
isqrtcovresnet101b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != isqrtcovresnet18 or weight_count == 44205096)
assert (model != isqrtcovresnet34 or weight_count == 54313256)
assert (model != isqrtcovresnet50 or weight_count == 56929832)
assert (model != isqrtcovresnet50b or weight_count == 56929832)
assert (model != isqrtcovresnet101 or weight_count == 75921960)
assert (model != isqrtcovresnet101b or weight_count == 75921960)
x = torch.randn(14, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (14, 1000))
if __name__ == "__main__":
_test()
| 15,872
| 33.885714
| 120
|
py
|
imgclsmob
|
imgclsmob-master/pytorch/pytorchcv/models/shufflenetv2.py
|
"""
ShuffleNet V2 for ImageNet-1K, implemented in PyTorch.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock
class ShuffleUnit(nn.Module):
"""
ShuffleNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual):
super(ShuffleUnit, self).__init__()
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
self.compress_conv1 = conv1x1(
in_channels=(in_channels if self.downsample else mid_channels),
out_channels=mid_channels)
self.compress_bn1 = nn.BatchNorm2d(num_features=mid_channels)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
stride=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm2d(num_features=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=mid_channels)
self.expand_bn3 = nn.BatchNorm2d(num_features=mid_channels)
if self.use_se:
self.se = SEBlock(channels=mid_channels)
if downsample:
self.dw_conv4 = depthwise_conv3x3(
channels=in_channels,
stride=2)
self.dw_bn4 = nn.BatchNorm2d(num_features=in_channels)
self.expand_conv5 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.expand_bn5 = nn.BatchNorm2d(num_features=mid_channels)
self.activ = nn.ReLU(inplace=True)
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
def forward(self, x):
if self.downsample:
y1 = self.dw_conv4(x)
y1 = self.dw_bn4(y1)
y1 = self.expand_conv5(y1)
y1 = self.expand_bn5(y1)
y1 = self.activ(y1)
x2 = x
else:
y1, x2 = torch.chunk(x, chunks=2, dim=1)
y2 = self.compress_conv1(x2)
y2 = self.compress_bn1(y2)
y2 = self.activ(y2)
y2 = self.dw_conv2(y2)
y2 = self.dw_bn2(y2)
y2 = self.expand_conv3(y2)
y2 = self.expand_bn3(y2)
y2 = self.activ(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = torch.cat((y1, y2), dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(nn.Module):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(ShuffleInitBlock, self).__init__()
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=0,
ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2(nn.Module):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(ShuffleNetV2, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add_module("unit{}".format(j + 1), ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 11,722
| 30.942779
| 115
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.