code stringlengths 17 6.64M |
|---|
class BilinearScale(mx.operator.CustomOp):
def __init__(self, scale):
self.scale = scale
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
(h, w) = x.shape[2:]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScale')
class BilinearScaleProp(mx.operator.CustomOpProp):
def __init__(self, scale):
super(BilinearScaleProp, self).__init__(need_top_grad=True)
self.scale = float(scale)
def infer_shape(self, in_shape):
(n, c, h, w) = in_shape[0]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
return (in_shape, [(n, c, new_h, new_w)], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScale(self.scale)
|
class BilinearScaleLike(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(x, x_ref) = in_data
(new_h, new_w) = x_ref.shape[2:]
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScaleLike')
class BilinearScaleLikeProp(mx.operator.CustomOpProp):
def __init__(self):
super(BilinearScaleLikeProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['d1', 'd2']
def infer_shape(self, in_shape):
out_shape = list(in_shape[1])
out_shape[1] = in_shape[0][1]
return (in_shape, [out_shape], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScaleLike()
|
class SegmentLoss(mx.operator.CustomOp):
def __init__(self, has_grad_scale):
self.has_grad_scale = has_grad_scale
def forward(self, is_train, req, in_data, out_data, aux):
prediction = mx.nd.softmax(in_data[0], axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = mx.nd.one_hot(in_data[1], depth=prediction.shape[1]).transpose((0, 3, 1, 2))
if (prediction.shape[2] != label.shape[2]):
label = mx.nd.contrib.BilinearResize2D(label, height=prediction.shape[2], width=prediction.shape[3])
label = (mx.nd.one_hot(mx.nd.argmax(label, axis=1), depth=prediction.shape[1]).transpose((0, 3, 1, 2)) * (mx.nd.max(label, axis=1, keepdims=True) > 0.5))
mask = label.sum(axis=1, keepdims=True)
num_pixel = mx.nd.maximum((mask.sum() / mask.shape[0]), 1e-05)
grad = (((prediction - label) * mask) / num_pixel)
if self.has_grad_scale:
grad_scale = in_data[2].reshape((- 1), 1, 1, 1)
grad = (grad * grad_scale)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('SegmentLoss')
class SegmentLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0):
super(SegmentLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'label', 'scale']
else:
return ['data', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return SegmentLoss(self.has_grad_scale)
|
class CompletionLoss(mx.operator.CustomOp):
def __init__(self, has_grad_scale):
self.has_grad_scale = has_grad_scale
def forward(self, is_train, req, in_data, out_data, aux):
prediction = mx.nd.softmax(in_data[0], axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
(logit, target, label) = in_data[:3]
prediction = out_data[0]
onehot = target.argmax(axis=1)
onehot = mx.nd.one_hot(onehot, depth=logit.shape[1]).transpose((0, 3, 1, 2))
label = mx.nd.one_hot(label, depth=logit.shape[1]).transpose((0, 3, 1, 2))
mask = label.max(axis=(2, 3), keepdims=True)
onehot = (onehot * mask)
mask = onehot.sum(axis=1, keepdims=True)
num_pixel = (mask.sum() / mask.shape[0])
grad = (((prediction - onehot) * mask) / num_pixel)
if self.has_grad_scale:
grad_scale = in_data[3].reshape((- 1), 1, 1, 1)
grad = (grad * grad_scale)
in_grad[1][:] = 0
in_grad[2][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('CompletionLoss')
class CompletionLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0):
super(CompletionLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'target', 'label', 'scale']
else:
return ['data', 'target', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return CompletionLoss(self.has_grad_scale)
|
class MultiSigmoidLoss(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(logit, label) = in_data
prediction = mx.nd.sigmoid(logit, axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = in_data[1]
grad = (prediction - label)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('MultiSigmoidLoss')
class MultiSigmoidLossProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultiSigmoidLossProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return MultiSigmoidLoss()
|
def config_efficientnet(model_name):
assert re.match('^efficientnet-b[0-7]$', model_name), model_name
efficientnet_params = DEFAULT_EFFICIENT_PARAMS[model_name]
block_args = DEFAULT_EFFICIENT_BLOCK_ARGS
(width_coefficient, depth_coefficient, resolution, dropout_rate) = efficientnet_params
global_params = {'block_args': block_args, 'batch_norm_momentum': 0.99, 'batch_norm_epsilon': 0.001, 'dropout_rate': dropout_rate, 'survival_prob': 0.8, 'num_classes': 1000, 'width_coefficient': width_coefficient, 'depth_coefficient': depth_coefficient, 'depth_divisor': 8, 'min_depth': None, 'use_se': True, 'clip_projection_output': False}
global_params = namedtuple('global_parmas', sorted(global_params))(**global_params)
kv_list = [dict([re.split('([\\d\\.]+)', op)[:2] for op in _block_args.split('_')]) for _block_args in block_args]
block_args_list = [{'kernel_size': int(kv['k']), 'num_repeat': int(kv['r']), 'input_filters': int(kv['i']), 'output_filters': int(kv['o']), 'expand_ratio': int(kv['e']), 'id_skip': ('noskip' not in block_string), 'se_ratio': (float(kv['se']) if ('se' in kv) else None), 'strides': (int(kv['s'][0]), int(kv['s'][1])), 'conv_type': int(kv.get('c', '0')), 'fused_conv': int(kv.get('f', '0')), 'super_pixel': int(kv.get('p', '0')), 'dilate': int(kv.get('d', '1')), 'condconv': ('cc' in block_string), 'survival_prob': 1.0} for (kv, block_string) in zip(kv_list, block_args)]
block_args_list = [namedtuple('block_args', sorted(x))(**x) for x in block_args_list]
return (block_args_list, global_params)
|
def MBConvBlock(data, block_args, global_params, use_global_stats, block_id, name, lr_mult, reuse, input_size=None):
if block_args.super_pixel:
raise NotImplementedError
if block_args.condconv:
raise NotImplementedError
kernel = ((block_args.kernel_size,) * 2)
dilate = (((1 if (kernel[0] == 1) else block_args.dilate),) * 2)
pad = ((((((kernel[0] - 1) * dilate[0]) + 1) // 2),) * 2)
momentum = global_params.batch_norm_momentum
eps = global_params.batch_norm_epsilon
num_filters = (block_args.input_filters * block_args.expand_ratio)
(conv_id, bn_id) = (0, 0)
if block_args.fused_conv:
x = Conv(data, num_filters, kernel, block_args.strides, pad=pad, dilate=dilate, no_bias=True, name=(name + ('block%d_conv' % block_id)), lr_mult=lr_mult, reuse=reuse)
else:
if (block_args.expand_ratio != 1):
x = Conv(data, num_filters, (1, 1), no_bias=True, name=(name + ('block%d_conv%d' % (block_id, conv_id))), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + ('block%d_bn%d' % (block_id, bn_id))), lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
(conv_id, bn_id) = ((conv_id + 1), (bn_id + 1))
else:
x = data
x = Conv(x, num_filters, kernel, block_args.strides, pad=pad, dilate=dilate, num_group=num_filters, no_bias=True, name=(name + ('block%d_depthwise_conv0' % block_id)), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + ('block%d_bn%d' % (block_id, bn_id))), lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
bn_id += 1
has_se = (global_params.use_se and (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1))
if has_se:
num_filters_rd = max(1, int((block_args.input_filters * block_args.se_ratio)))
x_se = mx.sym.mean(x, axis=(2, 3), keepdims=True)
x_se = Conv(x_se, num_filters_rd, (1, 1), name=(name + ('block%d_se_conv0' % block_id)), lr_mult=lr_mult, reuse=reuse)
x_se = Swish(x_se)
x_se = Conv(x_se, num_filters, (1, 1), name=(name + ('block%d_se_conv1' % block_id)), lr_mult=lr_mult, reuse=reuse)
x = mx.sym.broadcast_mul(mx.sym.sigmoid(x_se), x)
x = Conv(x, block_args.output_filters, (1, 1), no_bias=True, name=(name + ('block%d_conv%d' % (block_id, conv_id))), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + ('block%d_bn%d' % (block_id, bn_id))), lr_mult=lr_mult, reuse=reuse)
(conv_id, bn_id) = ((conv_id + 1), (bn_id + 1))
if global_params.clip_projection_output:
x = mx.sym.clip(x, a_min=(- 6), a_max=6)
if (block_args.id_skip and all([(s == 1) for s in block_args.strides]) and (block_args.input_filters == block_args.output_filters)):
if (block_args.survival_prob > 0):
x = mx.sym.Custom(x, p=(1 - block_args.survival_prob), op_type='DropConnect')
x = (x + data)
return x
|
def MBConvBlockWithoutDepthwise(data, block_args, global_params, use_global_stats, begin_id, name, lr_mult, reuse):
raise NotImplementedError
|
def meta_efficientnet(model_name, get_internals=False, input_size=None):
(block_args_list, global_params) = config_efficientnet(model_name)
def round_filters(num_filters):
multiplier = global_params.width_coefficient
if (not multiplier):
return num_filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
num_filters = (num_filters * multiplier)
new_num = max((min_depth or divisor), ((int((num_filters + (divisor / 2))) // divisor) * divisor))
if (new_num < (0.9 * num_filters)):
new_num += divisor
return int(new_num)
def round_repeats(num_repeat):
multiplier = global_params.depth_coefficient
if (not multiplier):
return num_repeat
return int(np.ceil((multiplier * num_repeat)))
def efficient_model(data, use_global_stats=False, bn_data=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
endpoints = {}
momentum = global_params.batch_norm_momentum
eps = global_params.batch_norm_epsilon
endpoints['input'] = data
if bn_data:
data = BN(data, fix_gamma=True, momentum=momentum, eps=eps, name='bn_data', lr_mult=lr_mult, reuse=reuse)
x = Conv(data, round_filters(32), (3, 3), (2, 2), pad=(1, 1), no_bias=True, name=(name + 'stem_conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name='stem_bn0', lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
endpoints['stem'] = x
block_id = 0
total_blocks = sum([block_args.num_repeat for block_args in block_args_list])
survival_prob = global_params.survival_prob
for (i, block_args) in enumerate(block_args_list):
assert (block_args.num_repeat > 0)
assert (block_args.super_pixel in [0, 1, 2])
block_args = block_args._replace(input_filters=round_filters(block_args.input_filters), output_filters=round_filters(block_args.output_filters), num_repeat=round_repeats(block_args.num_repeat), survival_prob=(1.0 - (((1.0 - global_params.survival_prob) * float(block_id)) / total_blocks)))
ConvBlock = {0: MBConvBlock, 1: MBConvBlockWithoutDepthwise}[block_args.conv_type]
x = ConvBlock(x, block_args, global_params, use_global_stats=use_global_stats, block_id=block_id, name=name, lr_mult=lr_mult, reuse=reuse)
endpoints[('block%d' % block_id)] = x
block_id += 1
for j in range((block_args.num_repeat - 1)):
block_args = block_args._replace(input_filters=block_args.output_filters, strides=(1, 1), survival_prob=(1.0 - (((1.0 - global_params.survival_prob) * float(block_id)) / total_blocks)))
x = ConvBlock(x, block_args, global_params, use_global_stats=use_global_stats, block_id=block_id, name=name, lr_mult=lr_mult, reuse=reuse, input_size=input_size)
endpoints[('block%d' % block_id)] = x
block_id += 1
x = Conv(x, round_filters(1280), (1, 1), no_bias=True, name=(name + 'head_conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name='head_bn0', lr_mult=lr_mult, reuse=reuse)
x = Swish(x)
if (global_params.dropout_rate > 0):
x = Drop(x, p=global_params.dropout_rate)
endpoints['head'] = x
x = Pool(x, kernel=(1, 1), pool_type='avg', global_pool=True)
x = mx.sym.flatten(x)
x = FC(x, global_params.num_classes, name=(name + 'head_fc0'), lr_mult=lr_mult, reuse=reuse)
endpoints['logit'] = x
x = mx.sym.softmax(x, axis=1)
endpoints['prob'] = x
if get_internals:
return (x, endpoints)
return x
return efficient_model
|
def tf2mx_params(ckpt_file, dst_file=None, name='', use_ema=True):
convert_w = (lambda x: mx.nd.array((x.transpose(3, 2, 0, 1) if (x.ndim == 4) else x.T)))
convert_b = (lambda x: mx.nd.array(x))
convert_dp_w = (lambda x: mx.nd.array(x.transpose(2, 3, 0, 1)))
lookup_ptype = {'kernel': ('arg', 'weight', convert_w), 'bias': ('arg', 'bias', convert_b), 'depthwise_kernel': ('arg', 'weight', convert_dp_w), 'gamma': ('arg', 'gamma', convert_b), 'beta': ('arg', 'beta', convert_b), 'moving_mean': ('aux', 'moving_mean', convert_b), 'moving_variance': ('aux', 'moving_var', convert_b)}
lookup_op = {'conv2d': 'conv', 'depthwise_conv2d': 'depthwise_conv', 'tpu_batch_normalization': 'bn', 'dense': 'fc'}
def mapKey(tf_key):
names = tf_key.split('/')
if (not re.match('^efficientnet-b[0-7]$', names[0])):
return (None, None)
(block, op, ptype) = names[1:4]
if block.startswith('blocks'):
block = ('block' + block.split('_')[(- 1)])
block_name = ((name + block) + '_')
if (op == 'se'):
(op, ptype) = names[3:5]
block_name = (block_name + 'se_')
r = re.match('^\\w*_(\\d+)$', op)
op_id = (r.group(1) if r else '0')
_op = (re.match('^(\\w+)_\\d+$', op).group(1) if r else op)
try:
(prefix, suffix, converter) = lookup_ptype[ptype]
except:
raise KeyError('[{}], ({}, {}, {}), {}'.format(ptype, block, op, ptype, tf_key))
op_name = lookup_op[_op]
return (((((((prefix + ':') + block_name) + op_name) + op_id) + '_') + suffix), converter)
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
reader = tf.train.load_checkpoint(ckpt_file)
shape_map = reader.get_variable_to_shape_map()
keys = sorted(shape_map.keys())
ema_keys = [k for k in keys if k.endswith('ExponentialMovingAverage')]
keys = list(set((list((set(keys) - set(ema_keys))) + [k.rsplit('/', 1)[0] for k in ema_keys])))
keys_ = [(k + '/ExponentialMovingAverage') for k in keys]
kk = {k: (k_ if (use_ema and (k_ in ema_keys)) else k) for (k, k_) in zip(keys, keys_)}
mx_params = {}
for k in kk.keys():
tf_key = kk[k]
(mx_key, converter) = mapKey(k)
if (mx_key is None):
if (tf_key != 'global_step'):
print(('Cannot parse tf_key: %s' % tf_key))
continue
if (mx_key in mx_params):
raise KeyError(('Duplicate key: %s, %s, %s' % (k, tf_key, mx_key)))
mx_params[mx_key] = converter(reader.get_tensor(tf_key))
if (dst_file is not None):
mx.nd.save(dst_file, mx_params)
arg_params = {k[4:]: v for (k, v) in mx_params.items() if k.startswith('arg:')}
aux_params = {k[4:]: v for (k, v) in mx_params.items() if k.startswith('aux:')}
return (arg_params, aux_params)
|
def incepConv(data, num_filter, kernel, stride=None, dilate=None, pad=None, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = Conv(data, num_filter, kernel, stride, dilate, pad, name=('conv_%s' % name), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('bn_%s' % name), lr_mult=lr_mult, reuse=reuse)
x = Relu(x)
return x
|
def incepBlockA(data, num_filter_1, num_filter_3r, num_filter_3, num_filter_d3r, num_filter_d3, num_filter_p, pool_type, dilate=1, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x1 = incepConv(data, num_filter_1, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_1x1' % name), lr_mult=lr_mult, reuse=reuse)
x3 = incepConv(data, num_filter_3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
x3 = incepConv(x3, num_filter_3, (3, 3), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(data, num_filter_d3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_0' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_1' % name), lr_mult=lr_mult, reuse=reuse)
xp = Pool(data, kernel=(3, 3), stride=(1, 1), pad=(1, 1), pool_type=pool_type)
xp = incepConv(xp, num_filter_p, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_proj' % name), lr_mult=lr_mult, reuse=reuse)
x = mx.sym.Concat(x1, x3, xd3, xp, dim=1, name=('ch_concat_%s_chconcat' % name))
return x
|
def incepBlockB(data, num_filter_3r, num_filter_3, num_filter_d3r, num_filter_d3, stride=2, dilate=1, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x3 = incepConv(data, num_filter_3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
x3 = incepConv(x3, num_filter_3, (3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_3x3' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(data, num_filter_d3r, (1, 1), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_reduce' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_0' % name), lr_mult=lr_mult, reuse=reuse)
xd3 = incepConv(xd3, num_filter_d3, (3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=('%s_double_3x3_1' % name), lr_mult=lr_mult, reuse=reuse)
xp = Pool(data, kernel=(3, 3), stride=((stride,) * 2), pad=(1, 1), pool_type='max')
x = mx.sym.Concat(x3, xd3, xp, dim=1, name=('ch_concat_%s_chconcat' % name))
return x
|
def inceptionBN(x, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(x, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
x = incepConv(x, 64, (7, 7), stride=(2, 2), pad=(3, 3), name=(name + '1'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool_1'), pool_type='max')
x = incepConv(x, 64, (1, 1), stride=(1, 1), pad=(0, 0), name=(name + '2_red'), lr_mult=lr_mult, reuse=reuse)
x = incepConv(x, 192, (3, 3), stride=(1, 1), pad=(1, 1), name=(name + '2'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool_2'), pool_type='max')
x = incepBlockA(x, 64, 64, 64, 64, 96, 32, 'avg', 1, momentum, eps, use_global_stats, '3a', lr_mult, reuse)
x = incepBlockA(x, 64, 64, 96, 64, 96, 64, 'avg', 1, momentum, eps, use_global_stats, '3b', lr_mult, reuse)
x = incepBlockB(x, 128, 160, 64, 96, 1, 2, momentum, eps, use_global_stats, '3c', lr_mult, reuse)
x = incepBlockA(x, 224, 64, 96, 96, 128, 128, 'avg', 2, momentum, eps, use_global_stats, '4a', lr_mult, reuse)
x = incepBlockA(x, 192, 96, 128, 96, 128, 128, 'avg', 2, momentum, eps, use_global_stats, '4b', lr_mult, reuse)
x = incepBlockA(x, 160, 128, 160, 128, 160, 128, 'avg', 2, momentum, eps, use_global_stats, '4c', lr_mult, reuse)
x = incepBlockA(x, 96, 128, 192, 160, 192, 128, 'avg', 2, momentum, eps, use_global_stats, '4d', lr_mult, reuse)
x = incepBlockB(x, 128, 192, 192, 256, 1, 4, momentum, eps, use_global_stats, '4e', lr_mult, reuse)
x = incepBlockA(x, 352, 192, 320, 160, 224, 128, 'avg', 4, momentum, eps, use_global_stats, '5a', lr_mult, reuse)
x = incepBlockA(x, 352, 192, 320, 192, 224, 128, 'max', 4, momentum, eps, use_global_stats, '5b', lr_mult, reuse)
return x
|
class OpConstant(mx.operator.CustomOp):
def __init__(self, val):
self.val = val
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
|
@mx.operator.register('Constant')
class OpConstantProp(mx.operator.CustomOpProp):
def __init__(self, val_str, shape_str, type_str='float32'):
super(OpConstantProp, self).__init__(need_top_grad=False)
val = [float(x) for x in val_str.split(',')]
shape = [int(x) for x in shape_str.split(',')]
self.val = mx.nd.array(val, dtype=type_str).reshape(shape)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [self.val.shape], [])
def infer_type(self, in_type):
return (in_type, [self.val.dtype], [])
def create_operator(self, ctx, shapes, dtypes):
return OpConstant(self.val.as_in_context(ctx))
|
def CustomConstantEncoder(value, dtype='float32'):
if (not isinstance(value, np.ndarray)):
if (not isinstance(value, (list, tuple))):
value = [value]
value = np.array(value, dtype=dtype)
return (','.join([str(x) for x in value.ravel()]), ','.join([str(x) for x in value.shape]))
|
def Constant(value, dtype='float32'):
assert isinstance(dtype, str), dtype
(val, shape) = CustomConstantEncoder(value, dtype)
return mx.sym.Custom(val_str=val, shape_str=shape, type_str=dtype, op_type='Constant')
|
class DropConnect(mx.operator.CustomOp):
def __init__(self, p):
self.drop_rate = p
self.mask = None
def forward(self, is_train, req, in_data, out_data, aux):
data = in_data[0]
if (is_train or (self.drop_rate == 0)):
mask_shape = ([data.shape[0]] + ([1] * (len(data.shape) - 1)))
mask = mx.nd.random.uniform(0, 1, mask_shape, ctx=data.context)
mask = ((mask > self.drop_rate) / (1 - self.drop_rate))
out = (data * mask)
self.mask = mask
else:
out = data
self.mask = None
self.assign(out_data[0], req[0], out)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
if (self.mask is None):
grad = out_grad[0].copy()
else:
grad = (out_grad[0] * self.mask)
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('DropConnect')
class DropConnectProp(mx.operator.CustomOpProp):
def __init__(self, p):
super(DropConnectProp, self).__init__(need_top_grad=True)
self.drop_rate = float(p)
assert ((self.drop_rate >= 0) and (self.drop_rate < 1))
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, in_shape, [])
def create_operator(self, ctx, shapes, dtypes):
return DropConnect(self.drop_rate)
|
class BilinearScale(mx.operator.CustomOp):
def __init__(self, scale):
self.scale = scale
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0]
(h, w) = x.shape[2:]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScale')
class BilinearScaleProp(mx.operator.CustomOpProp):
def __init__(self, scale):
super(BilinearScaleProp, self).__init__(need_top_grad=True)
self.scale = float(scale)
def infer_shape(self, in_shape):
(n, c, h, w) = in_shape[0]
new_h = (int(((h - 1) * self.scale)) + 1)
new_w = (int(((w - 1) * self.scale)) + 1)
return (in_shape, [(n, c, new_h, new_w)], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScale(self.scale)
|
class BilinearScaleLike(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(x, x_ref) = in_data
(new_h, new_w) = x_ref.shape[2:]
x.attach_grad()
with mx.autograd.record():
new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w)
self.new_x = new_x
self.x = x
self.assign(out_data[0], req[0], new_x)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.new_x.backward(out_grad[0])
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], self.x.grad)
|
@mx.operator.register('BilinearScaleLike')
class BilinearScaleLikeProp(mx.operator.CustomOpProp):
def __init__(self):
super(BilinearScaleLikeProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['d1', 'd2']
def infer_shape(self, in_shape):
out_shape = list(in_shape[1])
out_shape[1] = in_shape[0][1]
return (in_shape, [out_shape], [])
def create_operator(self, ctx, shapes, dtypes):
return BilinearScaleLike()
|
class SegmentLoss(mx.operator.CustomOp):
def __init__(self, has_grad_scale):
self.has_grad_scale = has_grad_scale
def forward(self, is_train, req, in_data, out_data, aux):
prediction = mx.nd.softmax(in_data[0], axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = mx.nd.one_hot(in_data[1], depth=prediction.shape[1]).transpose((0, 3, 1, 2))
if (prediction.shape[2] != label.shape[2]):
label = mx.nd.contrib.BilinearResize2D(label, height=prediction.shape[2], width=prediction.shape[3])
label = (mx.nd.one_hot(mx.nd.argmax(label, axis=1), depth=prediction.shape[1]).transpose((0, 3, 1, 2)) * (mx.nd.max(label, axis=1, keepdims=True) > 0.5))
mask = label.sum(axis=1, keepdims=True)
num_pixel = mx.nd.maximum((mask.sum() / mask.shape[0]), 1e-05)
grad = (((prediction - label) * mask) / num_pixel)
if self.has_grad_scale:
grad_scale = in_data[2].reshape((- 1), 1, 1, 1)
grad = (grad * grad_scale)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('SegmentLoss')
class SegmentLossProp(mx.operator.CustomOpProp):
def __init__(self, has_grad_scale=0):
super(SegmentLossProp, self).__init__(need_top_grad=False)
self.has_grad_scale = (int(has_grad_scale) > 0)
def list_arguments(self):
if self.has_grad_scale:
return ['data', 'label', 'scale']
else:
return ['data', 'label']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return SegmentLoss(self.has_grad_scale)
|
class MultiSigmoidLoss(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(logit, label) = in_data
prediction = mx.nd.sigmoid(logit, axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = in_data[1]
grad = (prediction - label)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('MultiSigmoidLoss')
class MultiSigmoidLossProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultiSigmoidLossProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return MultiSigmoidLoss()
|
class MultiSoftmaxLoss(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
(logit, label) = in_data
prediction = mx.nd.softmax(logit, axis=1)
self.assign(out_data[0], req[0], prediction)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
prediction = out_data[0]
label = in_data[1]
grad = (prediction - label)
in_grad[1][:] = 0
self.assign(in_grad[0], req[0], grad)
|
@mx.operator.register('MultiSoftmaxLoss')
class MultiSoftmaxLossProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultiSoftmaxLossProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return (in_shape, [in_shape[0]], [])
def create_operator(self, ctx, shapes, dtypes):
return MultiSoftmaxLoss()
|
def ResStem(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
else:
x = data
x = Conv(x, num_filter=num_filter, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name=(name + 'conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn0'), lr_mult=lr_mult, reuse=reuse)
x = Relu(x, name=(name + 'relu0'))
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name=(name + 'pool0'))
return x
|
def ResUnit(data, num_filter, stride, dilate, projection, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn1'), lr_mult=lr_mult, reuse=reuse)
if projection:
shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_sc'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn3'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv3'), lr_mult=lr_mult, reuse=reuse)
else:
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = (x + shortcut)
return x
|
def ResBlock(data, num_unit, num_filter, stride, dilate, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = ResUnit(data, num_filter, stride, dilate, True, bottle_neck, momentum, eps, use_global_stats, (name + '_unit1'), lr_mult, reuse)
for i in range(1, num_unit):
x = ResUnit(x, num_filter, 1, dilate, False, bottle_neck, momentum, eps, use_global_stats, (name + ('_unit%d' % (i + 1))), lr_mult, reuse)
return x
|
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ResStem(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse)
for i in range(4):
x = ResBlock(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet18(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (2, 2, 2, 2), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet34(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet50(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet101_aspp(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x_aspp = []
for d in (6, 12, 18, 24):
x_aspp.append(Conv(x, num_cls, kernel=(3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc1_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse))
x = sum(x_aspp)
return x
|
def ResStemV1(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
else:
x = data
x = Conv(x, num_filter=num_filter, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name=(name + 'conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn0'), lr_mult=lr_mult, reuse=reuse)
x = Relu(x, name=(name + 'relu0'))
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name=(name + 'pool0'))
return x
|
def ResUnitV1(data, num_filter, stride, dilate, projection, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
if projection:
shortcut = Conv(data, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_conv0'), lr_mult=lr_mult, reuse=reuse)
shortcut = BN(shortcut, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn0'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(data, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn1'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv3'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn3'), lr_mult=lr_mult, reuse=reuse)
else:
raise NotImplementedError
x = (x + shortcut)
x = Relu(x)
return x
|
def ResBlockV1(data, num_unit, num_filter, stride, dilate, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = ResUnitV1(data, num_filter, stride, dilate, True, bottle_neck, momentum, eps, use_global_stats, (name + '_unit1'), lr_mult, reuse)
for i in range(1, num_unit):
x = ResUnitV1(x, num_filter, 1, dilate, False, bottle_neck, momentum, eps, use_global_stats, (name + ('_unit%d' % (i + 1))), lr_mult, reuse)
return x
|
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ResStemV1(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse)
for i in range(4):
x = ResBlockV1(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse)
return x
|
def resnet18(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (2, 2, 2, 2), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet34(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet50(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet101_aspp(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x_aspp = []
for d in (6, 12, 18, 24):
x_aspp.append(Conv(x, num_cls, kernel=(3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc1_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse))
x = sum(x_aspp)
return x
|
def vgg16(x, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_2'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (2, 2), name=(name + 'pool1'))
x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_2'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (2, 2), name=(name + 'pool2'))
x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_2'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_3'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (2, 2), name=(name + 'pool3'))
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_2'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_3'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (2, 2), name=(name + 'pool4'))
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv5_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv5_2'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv5_3'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (2, 2), name=(name + 'pool5'))
x = Flatten(x, name=(name + 'flatten'))
x = FCRelu(x, num_hidden=4096, name=(name + 'fc6'), lr_mult=lr_mult, reuse=reuse)
x = Drop(x, p=0.5, name=(name + 'drop6'))
x = FCRelu(x, num_hidden=4096, name=(name + 'fc7'), lr_mult=lr_mult, reuse=reuse)
x = Drop(x, p=0.5, name=(name + 'drop7'))
return x
|
def vgg16_deeplab(x, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_2'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool1'))
x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_2'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool2'))
x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_2'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_3'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool3'))
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_2'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_3'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool4'))
x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_1'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_2'), lr_mult=lr_mult, reuse=reuse)
x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_3'), lr_mult=lr_mult, reuse=reuse)
x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5'))
x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5a'), pool_type='avg')
return x
|
def vgg16_largefov(x, num_cls, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse)
x = ConvRelu(x, 1024, (3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc6'), reuse=reuse)
x = Drop(x, 0.5, name=(name + 'drop6'))
x = ConvRelu(x, 1024, (1, 1), name=(name + 'fc7'), reuse=reuse)
x = Drop(x, 0.5, name=(name + 'drop7'))
x = Conv(x, num_cls, (1, 1), name=(name + 'fc8'), lr_mult=lr_mult, reuse=reuse)
return x
|
def vgg16_aspp(x, num_cls, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x_backbone = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse)
x_aspp = []
for d in (6, 12, 18, 24):
x = ConvRelu(x_backbone, 1024, (3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc6_aspp%d' % d)), reuse=reuse)
x = Drop(x, 0.5)
x = ConvRelu(x, 1024, (1, 1), name=(name + ('fc7_aspp%d' % d)), reuse=reuse)
x = Drop(x, 0.5)
x = Conv(x, num_cls, (1, 1), name=(name + ('fc8_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse)
x_aspp.append(x)
x = sum(x_aspp)
return x
|
def wResStem(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
else:
x = data
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name=(name + 'conv1a'), lr_mult=lr_mult, reuse=reuse)
return x
|
def wResUnit(data, num_filter, stride, dilate, projection, bottle_neck, dropout=0, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None, **kwargs):
assert (name is not None)
x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse)
if projection:
shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch1'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
if (dropout > 0):
x = Drop(x, p=dropout)
x = Conv(x, num_filter=int((num_filter / 2.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b2'), lr_mult=lr_mult, reuse=reuse)
if (dropout > 0):
x = Drop(x, p=dropout)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch2b2'), lr_mult=lr_mult, reuse=reuse)
else:
mid_filter = kwargs.get('mid_filter', num_filter)
fst_dilate = kwargs.get('fst_dilate', dilate)
x = Conv(x, num_filter=mid_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((fst_dilate,) * 2), dilate=((fst_dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
x = (x + shortcut)
return x
|
def wResBlock(data, num_unit, num_filter, stride, dilate, bottle_neck, dropout=0, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None, **kwargs):
assert (name is not None)
x = wResUnit(data, num_filter, stride, dilate, True, bottle_neck, dropout, momentum, eps, use_global_stats, name=(name + 'a'), lr_mult=lr_mult, reuse=reuse, **kwargs)
for i in range(1, num_unit):
x = wResUnit(x, num_filter, 1, dilate, False, bottle_neck, dropout, momentum, eps, use_global_stats, name=(name + ('b%d' % i)), lr_mult=lr_mult, reuse=reuse, **kwargs)
return x
|
def wresnet38(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, out_internals=False, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
internals = []
x = wResStem(x, 64, momentum, eps, use_global_stats, bn_data=True, name=name, lr_mult=lr_mult, reuse=reuse)
x = wResBlock(x, 3, 128, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '2'), lr_mult, reuse)
x = wResBlock(x, 3, 256, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '3'), lr_mult, reuse)
x = wResBlock(x, 6, 512, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '4'), lr_mult, reuse)
x = wResBlock(x, 3, 1024, 1, 2, False, 0, momentum, eps, use_global_stats, (name + '5'), lr_mult, reuse, mid_filter=512, fst_dilate=1)
internals.append(x)
x = wResBlock(x, 1, 2048, 1, 4, True, 0.3, momentum, eps, use_global_stats, (name + '6'), lr_mult, reuse)
internals.append(x)
x = wResBlock(x, 1, 4096, 1, 4, True, 0.5, momentum, eps, use_global_stats, (name + '7'), lr_mult, reuse)
internals.append(x)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn7'), lr_mult=lr_mult, reuse=reuse)
if out_internals:
return (x, internals)
else:
return x
|
def MultiScale(scales):
scales = [s for s in scales if (s != 1)]
def func_wrapper(model_func):
def model_func_ms(*args, **kwargs):
assert (len(args) > 0), 'Cannot find input variable'
input_var = args[0]
args = args[1:]
out_0 = model_func(*((input_var,) + args), **kwargs)
assert (len(out_0) == 1), 'Only single output implemented'
reuse = kwargs.get('reuse', None)
if (reuse is None):
reuse = out_0
if ('reuse' in kwargs):
del kwargs['reuse']
is_tensor4d = (len(out_0.infer_shape(data=(1, 3, 100, 100))[1][0]) == 4)
out_ms = [out_0]
for scale in scales:
input_var_s = mx.sym.Custom(input_var, scale=scale, op_type='BilinearScale')
out_s = model_func(*((input_var_s,) + args), reuse=reuse, **kwargs)
if is_tensor4d:
out_s = mx.sym.Custom(out_s, out_0, op_type='BilinearScaleLike')
out_ms.append(out_s)
out_max = out_ms[0]
for out_s in out_ms[1:]:
out_max = mx.sym.maximum(out_max, out_s)
out_ms.append(out_max)
return mx.sym.Group(out_ms)
return model_func_ms
return func_wrapper
|
def ResStem(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
if bn_data:
x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse)
else:
x = data
x = Conv(x, num_filter=num_filter, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name=(name + 'conv0'), lr_mult=lr_mult, reuse=reuse)
x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn0'), lr_mult=lr_mult, reuse=reuse)
x = Relu(x, name=(name + 'relu0'))
x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name=(name + 'pool0'))
return x
|
def ResUnit(data, num_filter, stride, dilate, projection, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn1'), lr_mult=lr_mult, reuse=reuse)
if projection:
shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_sc'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn3'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv3'), lr_mult=lr_mult, reuse=reuse)
else:
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse)
x = (x + shortcut)
return x
|
def ResBlock(data, num_unit, num_filter, stride, dilate, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
assert (name is not None)
x = ResUnit(data, num_filter, stride, dilate, True, bottle_neck, momentum, eps, use_global_stats, (name + '_unit1'), lr_mult, reuse)
for i in range(1, num_unit):
x = ResUnit(x, num_filter, 1, dilate, False, bottle_neck, momentum, eps, use_global_stats, (name + ('_unit%d' % (i + 1))), lr_mult, reuse)
return x
|
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = ResStem(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse)
for i in range(4):
x = ResBlock(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet18(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (2, 2, 2, 2), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet34(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet50(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet101(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse)
x = Pool(x, (1, 1), pool_type='avg', global_pool=True)
x = Flatten(x)
return x
|
def resnet50_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse)
return x
|
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None):
name = ('' if (name is None) else name)
x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse)
x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse)
return x
|
class _VOC_proto(object):
@staticmethod
def _get_palette():
def bitget(bit, idx):
return ((bit & (1 << idx)) > 0)
cmap = []
for i in range(256):
(r, g, b) = (0, 0, 0)
idx = i
for j in range(8):
r = (r | (bitget(idx, 0) << (7 - j)))
g = (g | (bitget(idx, 1) << (7 - j)))
b = (b | (bitget(idx, 2) << (7 - j)))
idx = (idx >> 3)
cmap.append((b, g, r))
return np.array(cmap).astype(np.uint8)
def __init__(self):
self.categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
self.palette = self._get_palette()
def name2index(self, name):
return self.categories.index(name)
def index2name(self, index):
return self.categories[index]
def get_annotation(self, filename, use_diff=False):
tree = ET.parse(filename)
root = tree.getroot()
annotation = []
tmp_annotation = []
for obj in root.findall('object'):
cat = obj.find('name').text
non_diff = (1 - int(obj.find('difficult').text))
if (use_diff or non_diff):
annotation.append(self.name2index(cat))
else:
tmp_annotation.append(self.name2index(cat))
annotation = list(set(annotation))
if (len(annotation) == 0):
annotation += list(set(tmp_annotation))
annotation.sort()
return annotation
|
def imwrite(filename, image):
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
try:
os.makedirs(dirname)
except:
pass
cv2.imwrite(filename, image)
|
def npsave(filename, data):
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
try:
os.makedirs(dirname)
except:
pass
np.save(filename, data)
|
def pkldump(filename, data):
dirname = os.path.dirname(filename)
if (not os.path.exists(dirname)):
try:
os.makedirs(dirname)
except:
pass
with open(filename, 'wb') as f:
pickle.dump(data, f)
|
def imhstack(images, height=None):
images = as_list(images)
images = list(map(image2C3, images))
if (height is None):
height = np.array([img.shape[0] for img in images]).max()
images = [resize_height(img, height) for img in images]
if (len(images) == 1):
return images[0]
images = [[img, np.full((height, 3, 3), 255, np.uint8)] for img in images]
images = np.hstack(sum(images, []))
return images
|
def imvstack(images, width=None):
images = as_list(images)
images = list(map(image2C3, images))
if (width is None):
width = np.array([img.shape[1] for img in images]).max()
images = [resize_width(img, width) for img in images]
if (len(images) == 1):
return images[0]
images = [[img, np.full((3, width, 3), 255, np.uint8)] for img in images]
images = np.vstack(sum(images, []))
return images
|
def as_list(data):
if (not isinstance(data, (list, tuple))):
return [data]
return list(data)
|
def image2C3(image):
if (image.ndim == 3):
return image
if (image.ndim == 2):
return np.repeat(image[(..., np.newaxis)], 3, axis=2)
raise ValueError('image.ndim = {}, invalid image.'.format(image.ndim))
|
def resize_height(image, height):
if (image.shape[0] == height):
return image
(h, w) = image.shape[:2]
width = ((height * w) // h)
image = cv2.resize(image, (width, height))
return image
|
def resize_width(image, width):
if (image.shape[1] == width):
return image
(h, w) = image.shape[:2]
height = ((width * h) // w)
image = cv2.resize(image, (width, height))
return image
|
def imtext(image, text, space=(3, 3), color=(0, 0, 0), thickness=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0):
assert isinstance(text, str), type(text)
size = cv2.getTextSize(text, fontFace, fontScale, thickness)
image = cv2.putText(image, text, (space[0], (size[1] + space[1])), fontFace, fontScale, color, thickness)
return image
|
def setGPU(gpus):
len_gpus = len(gpus.split(','))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
gpus = ','.join(map(str, range(len_gpus)))
return gpus
|
def getTime():
return datetime.now().strftime('%m-%d %H:%M:%S')
|
class Timer(object):
curr_record = None
prev_record = None
@classmethod
def record(cls):
cls.prev_record = cls.curr_record
cls.curr_record = time.time()
@classmethod
def interval(cls):
if (cls.prev_record is None):
return 0
return (cls.curr_record - cls.prev_record)
|
def wrapColor(string, color):
try:
header = {'red': '\x1b[91m', 'green': '\x1b[92m', 'yellow': '\x1b[93m', 'blue': '\x1b[94m', 'purple': '\x1b[95m', 'cyan': '\x1b[96m', 'darkcyan': '\x1b[36m', 'bold': '\x1b[1m', 'underline': '\x1b[4m'}[color.lower()]
except KeyError:
raise ValueError('Unknown color: {}'.format(color))
return ((header + string) + '\x1b[0m')
|
def info(logger, msg, color=None):
msg = ('[{}]'.format(getTime()) + msg)
if (logger is not None):
logger.info(msg)
if (color is not None):
msg = wrapColor(msg, color)
print(msg)
|
def summaryArgs(logger, args, color=None):
if isinstance(args, ModuleType):
args = vars(args)
keys = [key for key in args.keys() if (key[:2] != '__')]
keys.sort()
length = max([len(x) for x in keys])
msg = [(('{:<' + str(length)) + '}: {}').format(k, args[k]) for k in keys]
msg = ('\n' + '\n'.join(msg))
info(logger, msg, color)
|
def loadParams(filename):
data = mx.nd.load(filename)
(arg_params, aux_params) = ({}, {})
for (name, value) in data.items():
if (name[:3] == 'arg'):
arg_params[name[4:]] = value
elif (name[:3] == 'aux'):
aux_params[name[4:]] = value
if (len(arg_params) == 0):
arg_params = None
if (len(aux_params) == 0):
aux_params = None
return (arg_params, aux_params)
|
class SaveParams(object):
def __init__(self, model, snapshot, model_name, num_save=5):
self.model = model
self.snapshot = snapshot
self.model_name = model_name
self.num_save = num_save
self.save_params = []
def save(self, n_epoch):
self.save_params += [os.path.join(self.snapshot, '{}-{:04d}.params'.format(self.model_name, n_epoch)), os.path.join(self.snapshot, '{}-{:04d}.states'.format(self.model_name, n_epoch))]
self.model.save_params(self.save_params[(- 2)])
self.model.save_optimizer_states(self.save_params[(- 1)])
if (len(self.save_params) > (2 * self.num_save)):
call(['rm', self.save_params[0], self.save_params[1]])
self.save_params = self.save_params[2:]
return self.save_params[(- 2):]
def __call__(self, n_epoch):
return self.save(n_epoch)
|
def getLogger(snapshot, model_name):
if (not os.path.exists(snapshot)):
os.makedirs(snapshot)
logging.basicConfig(filename=os.path.join(snapshot, (model_name + '.log')), level=logging.INFO)
logger = logging.getLogger()
return logger
|
class LrScheduler(object):
def __init__(self, method, init_lr, kwargs):
self.method = method
self.init_lr = init_lr
if (method == 'step'):
self.step_list = kwargs['step_list']
self.factor = kwargs['factor']
self.get = self._step
elif (method == 'poly'):
self.num_epoch = kwargs['num_epoch']
self.power = kwargs['power']
self.get = self._poly
elif (method == 'ramp'):
self.ramp_up = kwargs['ramp_up']
self.ramp_down = kwargs['ramp_down']
self.num_epoch = kwargs['num_epoch']
self.scale = kwargs['scale']
self.get = self._ramp
else:
raise ValueError(method)
def _step(self, current_epoch):
lr = self.init_lr
step_list = [x for x in self.step_list]
while ((len(step_list) > 0) and (current_epoch >= step_list[0])):
lr *= self.factor
del step_list[0]
return lr
def _poly(self, current_epoch):
lr = (self.init_lr * ((1.0 - (float(current_epoch) / self.num_epoch)) ** self.power))
return lr
def _ramp(self, current_epoch):
if (current_epoch < self.ramp_up):
decay = np.exp(((- ((1 - (float(current_epoch) / self.ramp_up)) ** 2)) * self.scale))
elif (current_epoch > (self.num_epoch - self.ramp_down)):
decay = np.exp(((- ((float(((current_epoch + self.ramp_down) - self.num_epoch)) / self.ramp_down) ** 2)) * self.scale))
else:
decay = 1.0
lr = (self.init_lr * decay)
return lr
|
class GradBuffer(object):
def __init__(self, model):
self.model = model
self.cache = None
def write(self):
if (self.cache is None):
self.cache = [[(None if (g is None) else g.copyto(g.context)) for g in g_list] for g_list in self.model._exec_group.grad_arrays]
else:
for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache):
for (g_src, g_dst) in zip(gs_src, gs_dst):
if (g_src is None):
continue
g_src.copyto(g_dst)
def read_add(self):
assert (self.cache is not None)
for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache):
for (g_src, g_dst) in zip(gs_src, gs_dst):
if (g_src is None):
continue
g_src += g_dst
|
def initNormal(mean, std, name, shape):
if name.endswith('_weight'):
return mx.nd.normal(mean, std, shape)
if name.endswith('_bias'):
return mx.nd.zeros(shape)
if name.endswith('_gamma'):
return mx.nd.ones(shape)
if name.endswith('_beta'):
return mx.nd.zeros(shape)
if name.endswith('_moving_mean'):
return mx.nd.zeros(shape)
if name.endswith('_moving_var'):
return mx.nd.ones(shape)
raise ValueError('Unknown name type for `{}`'.format(name))
|
def checkParams(mod, arg_params, aux_params, auto_fix=True, initializer=mx.init.Normal(0.01), logger=None):
arg_params = ({} if (arg_params is None) else arg_params)
aux_params = ({} if (aux_params is None) else aux_params)
arg_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.param_names, mod._exec_group.param_arrays)}
aux_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.aux_names, mod._exec_group.aux_arrays)}
(extra_arg_params, extra_aux_params) = ([], [])
for name in arg_params.keys():
if (name not in arg_shapes):
extra_arg_params.append(name)
for name in aux_params.keys():
if (name not in aux_shapes):
extra_aux_params.append(name)
(miss_arg_params, miss_aux_params) = ([], [])
for name in arg_shapes.keys():
if (name not in arg_params):
miss_arg_params.append(name)
for name in aux_shapes.keys():
if (name not in aux_params):
miss_aux_params.append(name)
(mismatch_arg_params, mismatch_aux_params) = ([], [])
for name in arg_params.keys():
if ((name in arg_shapes) and (arg_shapes[name] != arg_params[name].shape)):
mismatch_arg_params.append(name)
for name in aux_params.keys():
if ((name in aux_shapes) and (aux_shapes[name] != aux_params[name].shape)):
mismatch_aux_params.append(name)
for name in extra_arg_params:
info(logger, 'Find extra arg_params: {}: given {}'.format(name, arg_params[name].shape), 'red')
for name in extra_aux_params:
info(logger, 'Find extra aux_params: {}: given {}'.format(name, aux_params[name].shape), 'red')
for name in miss_arg_params:
info(logger, 'Find missing arg_params: {}: target {}'.format(name, arg_shapes[name]), 'red')
for name in miss_aux_params:
info(logger, 'Find missing aux_params: {}: target {}'.format(name, aux_shapes[name]), 'red')
for name in mismatch_arg_params:
info(logger, 'Find mismatch arg_params: {}: given {}, target {}'.format(name, arg_params[name].shape, arg_shapes[name]), 'red')
for name in mismatch_aux_params:
info(logger, 'Find mismatch aux_params: {}: given {}, target {}'.format(name, aux_params[name].shape, aux_shapes[name]), 'red')
if (len((((((extra_arg_params + extra_aux_params) + miss_arg_params) + miss_aux_params) + mismatch_arg_params) + mismatch_aux_params)) == 0):
return (arg_params, aux_params)
if (not auto_fix):
info(logger, 'Bad params not fixed.', 'red')
return (arg_params, aux_params)
for name in (extra_arg_params + mismatch_arg_params):
del arg_params[name]
for name in (extra_aux_params + mismatch_aux_params):
del aux_params[name]
attrs = mod._symbol.attr_dict()
for name in (miss_arg_params + mismatch_arg_params):
arg_params[name] = mx.nd.zeros(arg_shapes[name])
try:
initializer(mx.init.InitDesc(name, attrs.get(name, None)), arg_params[name])
except ValueError:
initializer(name, arg_params[name])
for name in (miss_aux_params + mismatch_aux_params):
aux_params[name] = mx.nd.zeros(aux_shapes[name])
try:
initializer(mx.init.InitDesc(name, attrs.get(name, None)), aux_params[name])
except ValueError:
initializer(name, aux_params[name])
info(logger, 'Bad params auto fixed successfully.', 'red')
return (arg_params, aux_params)
|
def run_eval(data_list, pred_root, gt_root, num_cls):
def compute_confusion_matrix(names, label_root, pred_root, num_cls, num_threads=16, arr_=None):
if (num_threads == 1):
mat = np.zeros((num_cls, num_cls), np.float32)
for name in names:
gt = cv2.imread(os.path.join(label_root, (name + '.png')), 0).astype(np.int32)
pred = cv2.imread(os.path.join(pred_root, (name + '.png')), 0).astype(np.int32)
if (gt.shape != pred.shape):
info(None, 'NAME {}, gt.shape != pred.shape: [{} vs. {}]'.format(name, gt.shape, pred.shape), 'red')
continue
valid = (gt < num_cls)
mat += np.bincount(((gt[valid] * num_cls) + pred[valid]), minlength=(num_cls ** 2)).reshape(num_cls, (- 1))
if (arr_ is not None):
arr_mat = np.frombuffer(arr_.get_obj(), np.float32)
arr_mat += mat.ravel()
return mat
else:
workload = np.full((num_threads,), (len(names) // num_threads), np.int32)
if (workload.sum() < len(names)):
workload[:(len(names) - workload.sum())] += 1
workload = np.cumsum(np.hstack([0, workload]))
names_split = [names[i:j] for (i, j) in zip(workload[:(- 1)], workload[1:])]
arr_ = mp.Array('f', np.zeros(((num_cls * num_cls),), np.float32))
mat = np.frombuffer(arr_.get_obj(), np.float32).reshape(num_cls, (- 1))
jobs = [mp.Process(target=compute_confusion_matrix, args=(_names, label_root, pred_root, num_cls, 1, arr_)) for _names in names_split]
res = [job.start() for job in jobs]
res = [job.join() for job in jobs]
return mat.copy()
def compute_eval_results(confmat):
iou = (np.diag(confmat) / np.maximum(((confmat.sum(axis=0) + confmat.sum(axis=1)) - np.diag(confmat)), 1e-10))
return iou
with open(data_list) as f:
names = [x.strip() for x in f.readlines()]
confmat = compute_confusion_matrix(names, gt_root, pred_root, num_cls)
iou = compute_eval_results(confmat)
msg = 'mIOU: {}\n{}\n\n'.format(iou.mean(), iou)
print(msg)
|
def compute_iou(names, num_cls, target_root, gt_root, num_threads=16, arr_=None):
_compute_iou = (lambda x: (np.diag(x) / (((x.sum(axis=0) + x.sum(axis=1)) - np.diag(x)) + 1e-10)))
if isinstance(names, str):
with open(names) as f:
names = [name.strip() for name in f.readlines()]
if (num_threads == 1):
mat = np.zeros((num_cls, num_cls), np.float32)
for name in names:
gt = cv2.imread(os.path.join(gt_root, (name + '.png')), 0).astype(np.int32)
pred = cv2.imread(os.path.join(target_root, (name + '.png')), 0).astype(np.int32)
if (gt.shape != pred.shape):
info(None, 'Name {}, gt.shape != pred.shape: [{} vs. {}]'.format(name, gt.shape, pred.shape))
continue
valid = ((gt < num_cls) & (pred < num_cls))
mat += np.bincount(((gt[valid] * num_cls) + pred[valid]), minlength=(num_cls ** 2)).reshape(num_cls, (- 1))
if (arr_ is not None):
arr_mat = np.frombuffer(arr_.get_obj(), np.float32)
arr_mat += mat.ravel()
else:
return _compute_iou(mat.copy())
else:
workload = np.full((num_threads,), (len(names) // num_threads), np.int32)
if (workload.sum() < len(names)):
workload[:(len(names) - workload.sum())] += 1
workload = np.cumsum(np.hstack([0, workload]))
names_split = [names[i:j] for (i, j) in zip(workload[:(- 1)], workload[1:])]
arr_ = mp.Array('f', np.zeros(((num_cls * num_cls),), np.float32))
mat = np.frombuffer(arr_.get_obj(), np.float32).reshape(num_cls, (- 1))
jobs = [mp.Process(target=compute_iou, args=(_names, num_cls, target_root, gt_root, 1, arr_)) for _names in names_split]
[job.start() for job in jobs]
[job.join() for job in jobs]
return _compute_iou(mat.copy())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.