code stringlengths 17 6.64M |
|---|
class BoxAnnotatorOHEMOperator(mx.operator.CustomOp):
def __init__(self, num_classes, num_reg_classes, rm_last, roi_per_img):
super(BoxAnnotatorOHEMOperator, self).__init__()
self._num_classes = num_classes
self._num_reg_classes = num_reg_classes
self._roi_per_img = roi_per_img
self._rm_last = rm_last
def forward(self, is_train, req, in_data, out_data, aux):
cls_score = in_data[0]
bbox_pred = in_data[1]
labels = in_data[2].asnumpy()
bbox_targets = in_data[3]
bbox_weights = in_data[4]
if config.network.SOFTMAX_ENABLED:
per_roi_loss_cls = (mx.nd.SoftmaxActivation(cls_score) + 1e-14)
else:
per_roi_loss_cls = (cls_score + 1e-14)
per_roi_loss_cls = per_roi_loss_cls.asnumpy()
per_roi_loss_cls = per_roi_loss_cls[(np.arange(per_roi_loss_cls.shape[0], dtype='int'), labels.astype('int'))]
per_roi_loss_cls = ((- 1) * np.log(per_roi_loss_cls))
per_roi_loss_cls = np.reshape(per_roi_loss_cls, newshape=((- 1),))
per_roi_loss_bbox = (bbox_weights * mx.nd.smooth_l1((bbox_pred - bbox_targets), scalar=1.0))
per_roi_loss_bbox = mx.nd.sum(per_roi_loss_bbox, axis=1).asnumpy()
total_loss = (per_roi_loss_cls + per_roi_loss_bbox)
top_k_per_roi_loss = np.argsort(total_loss)
labels_ohem = labels
if (self._rm_last == 1):
labels_ohem[np.where((labels_ohem == (self._num_classes - 1)))] = (- 1)
labels_ohem[top_k_per_roi_loss[::(- 1)][self._roi_per_img:]] = (- 1)
bbox_weights_ohem = bbox_weights.asnumpy()
bbox_weights_ohem[top_k_per_roi_loss[::(- 1)][self._roi_per_img:]] = 0
labels_ohem = mx.nd.array(labels_ohem)
bbox_weights_ohem = mx.nd.array(bbox_weights_ohem)
for (ind, val) in enumerate([labels_ohem, bbox_weights_ohem]):
self.assign(out_data[ind], req[ind], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
for i in range(len(in_grad)):
self.assign(in_grad[i], req[i], 0)
|
@mx.operator.register('BoxAnnotatorOHEM')
class BoxAnnotatorOHEMProp(mx.operator.CustomOpProp):
def __init__(self, num_classes, num_reg_classes, rm_last, roi_per_img):
super(BoxAnnotatorOHEMProp, self).__init__(need_top_grad=False)
self._num_classes = int(num_classes)
self._num_reg_classes = int(num_reg_classes)
self._roi_per_img = int(roi_per_img)
self._rm_last = rm_last
def list_arguments(self):
return ['cls_score', 'bbox_pred', 'labels', 'bbox_targets', 'bbox_weights']
def list_outputs(self):
return ['labels_ohem', 'bbox_weights_ohem']
def infer_shape(self, in_shape):
labels_shape = in_shape[2]
bbox_weights_shape = in_shape[4]
return (in_shape, [labels_shape, bbox_weights_shape])
def create_operator(self, ctx, shapes, dtypes):
return BoxAnnotatorOHEMOperator(self._num_classes, self._num_reg_classes, self._rm_last, self._roi_per_img)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
|
class resnet_v1_101_fpn_dcn_rcnn(Symbol):
def __init__(self):
'\n Use __init__ to define parameter network needs\n '
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight'))
self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias'))
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu)
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6)
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix))
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix))
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix))
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix))
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix))
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix))
return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred)
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')])
offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name))
output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name='data')
im_info = mx.sym.Variable(name='im_info')
(res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
(fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5)
(rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
(rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
(rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
(rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
(rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2}
rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2}
arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()))
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob')
rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE))
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape')
(rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
rcnn_label = label
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
(arg_params2, aux_params2) = ({}, {})
for name in self.shared_param_list:
if ('offset' in name):
arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')])
else:
arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')])
arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)):
arg_params[k] = arg_params2[k]
for k in aux_params2:
if (k not in aux_params):
aux_params[k] = aux_params2[k]
|
class resnet_v1_101_fpn_dcn_rcnn_oneshot_v3(Symbol):
def __init__(self):
'\n Use __init__ to define parameter network needs\n '
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight'))
self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias'))
self.constants_dict = {}
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu)
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6)
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix))
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix))
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix))
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix))
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix))
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix))
return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred)
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')])
offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name))
output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_constant_symbol(self, const_val):
if (const_val in self.constants_dict):
return self.constants_dict[const_val]
name = 'const_eq_{0}'.format(const_val)
c = mx.sym.Variable(name, shape=(1,), init=MyConstant(value=[const_val]))
c = mx.sym.BlockGrad(c)
self.constants_dict[const_val] = c
return c
def cos_sim_2_dist_generic(self, cos_sim, x=None, y=None, x_is_norm=True, y_is_norm=True):
if x_is_norm:
x_norm = self.get_constant_symbol(1)
else:
assert (x is not None), 'if x is not L2 normalized then x must be provided'
x_norm = mx.sym.sum_axis(mx.sym.square(x), axis=0, keepdims=True)
x_norm = mx.sym.transpose(x_norm, axes=(1, 0))
if y_is_norm:
y_norm = self.get_constant_symbol(1)
else:
assert (y is not None), 'if y is not L2 normalized then y must be provided'
y_norm = mx.sym.sum_axis(mx.sym.square(y), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(x_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), y_norm)
return dist
def cos_sim_2_dist(self, cos_sim, cfg=None, embd=None, reps=None):
if cfg.network.EMBED_L2_NORM:
embd_norm = self.get_constant_symbol(1)
else:
assert (embd is not None), 'if embedding is not L2 normalized then embd must be provided'
embd_norm = mx.sym.sum_axis(mx.sym.square(embd), axis=1, keepdims=True)
embd_norm = mx.sym.reshape(embd_norm, shape=(0, 1, 1))
if cfg.network.REP_L2_NORM:
reps_norm = self.get_constant_symbol(1)
else:
assert (reps is not None), 'if representatives are not L2 normalized then reps must be provided'
reps_norm = mx.sym.sum_axis(mx.sym.square(reps), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(embd_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), reps_norm)
return dist
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name='data')
im_info = mx.sym.Variable(name='im_info')
(res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
(fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5)
(rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
(rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
(rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
(rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
(rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2}
rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2}
arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()))
if is_train:
if (not cfg.network.base_net_lock):
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
if (not cfg.network.base_net_lock):
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob')
rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE))
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape')
(rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
if (is_train and cfg.network.base_net_lock):
fc_new_2_relu = mx.sym.BlockGrad(fc_new_2_relu)
rois = mx.sym.BlockGrad(rois)
label = mx.sym.BlockGrad(label)
bbox_target = mx.sym.BlockGrad(bbox_target)
bbox_weight = mx.sym.BlockGrad(bbox_weight)
lr_mult = cfg.TRAIN.REPS_LR_MULT
if cfg.network.SEPARABLE_REPS:
base = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_base', num_hidden=(cfg.network.EMBEDDING_DIM * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
offset = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_offset', num_hidden=(cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS), no_bias=True, lr_mult=lr_mult)
base = mx.sym.reshape(base, shape=(cfg.network.EMBEDDING_DIM, 1, (num_classes - 1)))
offset = mx.sym.reshape(offset, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
representatives = mx.sym.broadcast_add(base, offset, name='fc_representatives')
else:
representatives = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives', num_hidden=((cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS) * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
representatives = mx.sym.reshape(representatives, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, (num_classes - 1)))
if cfg.network.REP_L2_NORM:
representatives = mx.sym.transpose(mx.sym.L2Normalization(mx.sym.transpose(representatives, axes=(1, 0, 2)), mode='channel'), axes=(1, 0, 2))
extra_outputs = [mx.sym.BlockGrad(representatives)]
batch_embed = mx.symbol.FullyConnected(name='batch_embed', data=fc_new_2_relu, num_hidden=cfg.network.EMBEDDING_DIM)
if cfg.network.EMBED_L2_NORM:
batch_embed = mx.sym.L2Normalization(data=batch_embed, name='batch_embed_nrm', mode='instance')
cos_sim = mx.sym.dot(batch_embed, representatives, transpose_b=False)
all_cls_rep_dist = self.cos_sim_2_dist(cos_sim, cfg, embd=batch_embed, reps=representatives)
if (is_train and cfg.network.EMBED_LOSS_ENABLED):
if cfg.network.SMOOTH_MIN:
sum_exp_dist = mx.sym.sum_axis(mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(cfg.network.SMOOTH_CONST))), axis=1, keepdims=True)
all_cls_min_dist = mx.sym.broadcast_div(mx.sym.log(sum_exp_dist), self.get_constant_symbol(cfg.network.SMOOTH_CONST))
else:
all_cls_min_dist = mx.sym.min_axis(all_cls_rep_dist, axis=1, keepdims=True)
all_cls_min_dist = mx.sym.reshape(all_cls_min_dist, shape=(0, (num_classes - 1)))
mod_true_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1, off_value=0), axis=1, begin=1, end=None)
mod_false_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1000, off_value=0), axis=1, begin=1, end=None)
min_dist_true = mx.sym.sum_axis(mx.sym.broadcast_mul(all_cls_min_dist, mod_true_class), axis=1)
min_dist_false = mx.sym.min_axis(mx.sym.broadcast_add(all_cls_min_dist, mod_false_class), axis=1)
embed_loss_val = mx.sym.broadcast_sub(min_dist_true, min_dist_false)
embed_loss_val = mx.sym.broadcast_add(embed_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
embed_loss_val = mx.sym.relu(embed_loss_val)
embed_loss_val = mx.sym.reshape(embed_loss_val, shape=(0, 1))
if (is_train and cfg.network.REPS_CLS_LOSS):
mask_block_ones = mx.sym.ones(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask_block_zeros = mx.sym.zeros(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask = None
for iC1 in range((num_classes - 1)):
mask_row = None
for iC2 in range((num_classes - 1)):
if (iC1 == iC2):
cblock = mask_block_ones
else:
cblock = mask_block_zeros
if (mask_row is None):
mask_row = cblock
else:
mask_row = mx.sym.concat(mask_row, cblock, dim=1)
if (mask is None):
mask = mask_row
else:
mask = mx.sym.concat(mask, mask_row, dim=0)
mask_NC = mx.sym.broadcast_mul(self.get_constant_symbol(1000), mask)
mask_C = mx.sym.broadcast_sub(self.get_constant_symbol(1000), mask_NC)
mask_C = mx.sym.BlockGrad(mask_C)
mask_NC = mx.sym.BlockGrad(mask_NC)
R = mx.sym.reshape(mx.sym.transpose(representatives, axes=(0, 2, 1)), shape=(0, (- 1)))
R2R_cos_sim = mx.sym.dot(R, R, transpose_a=True)
R2R = self.cos_sim_2_dist_generic(R2R_cos_sim, x=R, y=R, x_is_norm=cfg.network.REP_L2_NORM, y_is_norm=cfg.network.REP_L2_NORM)
C2C = mx.sym.broadcast_add(R2R, mask_C)
C2NC = mx.sym.broadcast_add(R2R, mask_NC)
min_dist_C = mx.sym.topk(C2C, axis=1, k=2, ret_typ='value', is_ascend=True)
min_dist_C = mx.sym.slice_axis(min_dist_C, axis=1, begin=1, end=2)
min_dist_NC = mx.sym.min_axis(C2NC, axis=1, keepdims=True)
reps_cls_loss_val = mx.sym.broadcast_sub(min_dist_C, min_dist_NC)
reps_cls_loss_val = mx.sym.broadcast_add(reps_cls_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
reps_cls_loss_val = mx.sym.relu(reps_cls_loss_val)
probs = mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(((- 0.5) / float((cfg.network.SIGMA ** 2))))))
comb_cls_scores = mx.sym.max_axis(probs, axis=1, keepdims=False)
comb_cls_scores = mx.sym.broadcast_add(comb_cls_scores, self.get_constant_symbol(1e-07))
bg_scores = mx.sym.broadcast_sub(self.get_constant_symbol((1 + 1e-07)), mx.sym.max_axis(comb_cls_scores, axis=1, keepdims=True))
cls_score = mx.sym.concat(bg_scores, comb_cls_scores, dim=1, name='bg_concat')
cls_score = mx.sym.reshape(cls_score, shape=(0, (- 1)))
if cfg.network.SOFTMAX_ENABLED:
cls_score = mx.sym.broadcast_mul(self.get_constant_symbol(cfg.network.SOFTMAX_MUL), cls_score)
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_score_lin = mx.symbol.FullyConnected(name='cls_score_lin', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
rm_last = int(cfg.TRAIN.RM_LAST)
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight, rm_last=rm_last)
if cfg.network.SOFTMAX_ENABLED:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
else:
zz = mx.sym.zeros_like(label)
cls_prob = mx.sym.BlockGrad(cls_score)
invalid = mx.sym.broadcast_equal(labels_ohem, self.get_constant_symbol((- 1)))
minoh_labels = mx.sym.one_hot(mx.sym.broadcast_add(mx.sym.cast(invalid, dtype='float32'), labels_ohem), depth=num_classes, on_value=(- 1), off_value=0)
ce_loss = mx.sym.where(invalid, x=zz, y=mx.sym.sum(mx.sym.broadcast_mul(minoh_labels, mx.sym.log(mx.sym.broadcast_add(cls_score, self.get_constant_symbol(1e-07)))), axis=1))
ce_loss = mx.sym.MakeLoss(ce_loss, normalization='valid')
bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
rcnn_label = labels_ohem
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weights_ohem, axis=1, begin=(- 1), end=None) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
rcnn_label = label
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=label, normalization='valid')
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weight, axis=1, begin=0, end=1) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
if cfg.network.EMBED_LOSS_ENABLED:
extra_outputs += [embed_loss]
if cfg.network.REPS_CLS_LOSS:
extra_outputs += [mx.sym.MakeLoss(name='reps_cls_loss', data=reps_cls_loss_val, grad_scale=(1.0 / (cfg.network.REPS_PER_CLASS * (num_classes - 1))))]
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
extra_outputs += [cls_prob_lin]
if (not cfg.network.SOFTMAX_ENABLED):
extra_outputs += [ce_loss]
extra_outputs += [mx.sym.BlockGrad(rois), mx.sym.identity(mx.sym.BlockGrad(batch_embed), name='psp_final_embed')]
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape')
if cfg.network.base_net_lock:
group = mx.sym.Group(([cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
group = mx.sym.Group(([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape')
cls_score_orig = cls_score
if cfg.network.SOFTMAX_ENABLED:
cls_score_orig = mx.sym.broadcast_div(cls_score_orig, self.get_constant_symbol(cfg.network.SOFTMAX_MUL))
cls_score_orig = mx.sym.Reshape(data=cls_score_orig, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes))
group = mx.sym.Group([rois, cls_prob, bbox_pred, mx.sym.identity(batch_embed, name='psp_final_embed'), mx.sym.identity(cls_score_orig, name='cls_score')])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
arg_params['batch_embed_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['batch_embed_weight'])
arg_params['batch_embed_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['batch_embed_bias'])
name = 'fc_representatives'
if cfg.network.SEPARABLE_REPS:
arg_params[(name + '_base_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_base_weight')])
arg_params[(name + '_offset_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_offset_weight')])
elif cfg.network.SEPARABLE_REPS_INIT:
C = mx.random.normal(0, 0.1, shape=(cfg.network.EMBEDDING_DIM, 1, (cfg.dataset.NUM_CLASSES - 1)))
R = mx.random.normal(0, 0.05, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
CR = (C + R)
arg_params[(name + '_weight')] = mx.nd.reshape(CR, shape=((- 1), 1))
else:
arg_params[(name + '_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_weight')])
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
arg_params['cls_score_lin_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_lin_weight'])
arg_params['cls_score_lin_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_lin_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
(arg_params2, aux_params2) = ({}, {})
for name in self.shared_param_list:
if ('offset' in name):
arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')])
else:
arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')])
arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if cfg.network.pretrained_weights_are_priority:
if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)):
arg_params[k] = arg_params2[k]
else:
arg_params[k] = arg_params2[k]
for k in aux_params2:
if cfg.network.pretrained_weights_are_priority:
if (k not in aux_params):
aux_params[k] = aux_params2[k]
else:
aux_params[k] = aux_params2[k]
|
class resnet_v1_101_fpn_dcn_rcnn_rep_noemb(Symbol):
def __init__(self):
'\n Use __init__ to define parameter network needs\n '
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight'))
self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias'))
self.constants_dict = {}
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu)
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6)
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix))
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix))
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix))
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix))
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix))
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix))
return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred)
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')])
offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name))
output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_constant_symbol(self, const_val):
if (const_val in self.constants_dict):
return self.constants_dict[const_val]
name = 'const_eq_{0}'.format(const_val)
c = mx.sym.Variable(name, shape=(1,), init=MyConstant(value=[const_val]))
c = mx.sym.BlockGrad(c)
self.constants_dict[const_val] = c
return c
def cos_sim_2_dist_generic(self, cos_sim, x=None, y=None, x_is_norm=True, y_is_norm=True):
if x_is_norm:
x_norm = self.get_constant_symbol(1)
else:
assert (x is not None), 'if x is not L2 normalized then x must be provided'
x_norm = mx.sym.sum_axis(mx.sym.square(x), axis=0, keepdims=True)
x_norm = mx.sym.transpose(x_norm, axes=(1, 0))
if y_is_norm:
y_norm = self.get_constant_symbol(1)
else:
assert (y is not None), 'if y is not L2 normalized then y must be provided'
y_norm = mx.sym.sum_axis(mx.sym.square(y), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(x_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), y_norm)
return dist
def cos_sim_2_dist(self, cos_sim, cfg=None, embd=None, reps=None):
if cfg.network.EMBED_L2_NORM:
embd_norm = self.get_constant_symbol(1)
else:
assert (embd is not None), 'if embedding is not L2 normalized then embd must be provided'
embd_norm = mx.sym.sum_axis(mx.sym.square(embd), axis=1, keepdims=True)
embd_norm = mx.sym.reshape(embd_norm, shape=(0, 1, 1))
if cfg.network.REP_L2_NORM:
reps_norm = self.get_constant_symbol(1)
else:
assert (reps is not None), 'if representatives are not L2 normalized then reps must be provided'
reps_norm = mx.sym.sum_axis(mx.sym.square(reps), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(embd_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), reps_norm)
return dist
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name='data')
im_info = mx.sym.Variable(name='im_info')
(res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
(fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5)
(rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
(rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
(rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
(rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
(rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2}
rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2}
arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()))
if is_train:
if (not cfg.network.base_net_lock):
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
if (not cfg.network.base_net_lock):
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob')
rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE))
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape')
(rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
if (is_train and cfg.network.base_net_lock):
fc_new_2_relu = mx.sym.BlockGrad(fc_new_2_relu)
rois = mx.sym.BlockGrad(rois)
label = mx.sym.BlockGrad(label)
bbox_target = mx.sym.BlockGrad(bbox_target)
bbox_weight = mx.sym.BlockGrad(bbox_weight)
lr_mult = cfg.TRAIN.REPS_LR_MULT
if cfg.network.SEPARABLE_REPS:
base = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_base', num_hidden=(cfg.network.EMBEDDING_DIM * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
offset = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_offset', num_hidden=(cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS), no_bias=True, lr_mult=lr_mult)
base = mx.sym.reshape(base, shape=(cfg.network.EMBEDDING_DIM, 1, (num_classes - 1)))
offset = mx.sym.reshape(offset, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
representatives = mx.sym.broadcast_add(base, offset, name='fc_representatives')
else:
representatives = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives', num_hidden=((cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS) * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
representatives = mx.sym.reshape(representatives, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, (num_classes - 1)))
if cfg.network.REP_L2_NORM:
representatives = mx.sym.transpose(mx.sym.L2Normalization(mx.sym.transpose(representatives, axes=(1, 0, 2)), mode='channel'), axes=(1, 0, 2))
extra_outputs = [mx.sym.BlockGrad(representatives)]
eps = 1e-05
x = mx.sym.FullyConnected(name='embed_dense_1', data=fc_new_2_relu, num_hidden=2048)
x = mx.sym.BatchNorm(name='embed_batchNorm_1', data=x, use_global_stats=True, fix_gamma=False, eps=eps)
x = mx.sym.Activation(name='embed_relu_1', data=x, act_type='relu')
x = mx.sym.FullyConnected(name='embed_dense_2', data=x, num_hidden=1024)
x = mx.sym.BatchNorm(name='embed_batchNorm_2', data=x, use_global_stats=True, fix_gamma=False, eps=eps)
x = mx.sym.Activation(name='embed_relu_2', data=x, act_type='relu')
x = mx.sym.FullyConnected(name='embed_dense_3', data=x, num_hidden=1024)
batch_embed = mx.sym.identity(name='batch_embed', data=fc_new_2_relu)
if cfg.network.EMBED_L2_NORM:
batch_embed = mx.sym.L2Normalization(data=batch_embed, name='batch_embed_nrm', mode='instance')
cos_sim = mx.sym.dot(batch_embed, representatives, transpose_b=False)
all_cls_rep_dist = self.cos_sim_2_dist(cos_sim, cfg, embd=batch_embed, reps=representatives)
if (is_train and cfg.network.EMBED_LOSS_ENABLED):
all_cls_min_dist = mx.sym.min_axis(all_cls_rep_dist, axis=1, keepdims=True)
all_cls_min_dist = mx.sym.reshape(all_cls_min_dist, shape=(0, (num_classes - 1)))
mod_true_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1, off_value=0), axis=1, begin=1, end=None)
mod_false_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1000, off_value=0), axis=1, begin=1, end=None)
min_dist_true = mx.sym.sum_axis(mx.sym.broadcast_mul(all_cls_min_dist, mod_true_class), axis=1)
min_dist_false = mx.sym.min_axis(mx.sym.broadcast_add(all_cls_min_dist, mod_false_class), axis=1)
embed_loss_val = mx.sym.broadcast_sub(min_dist_true, min_dist_false)
embed_loss_val = mx.sym.broadcast_add(embed_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
embed_loss_val = mx.sym.relu(embed_loss_val)
embed_loss_val = mx.sym.reshape(embed_loss_val, shape=(0, 1))
if (is_train and cfg.network.REPS_CLS_LOSS):
mask_block_ones = mx.sym.ones(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask_block_zeros = mx.sym.zeros(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask = None
for iC1 in range((num_classes - 1)):
mask_row = None
for iC2 in range((num_classes - 1)):
if (iC1 == iC2):
cblock = mask_block_ones
else:
cblock = mask_block_zeros
if (mask_row is None):
mask_row = cblock
else:
mask_row = mx.sym.concat(mask_row, cblock, dim=1)
if (mask is None):
mask = mask_row
else:
mask = mx.sym.concat(mask, mask_row, dim=0)
mask_NC = mx.sym.broadcast_mul(self.get_constant_symbol(1000), mask)
mask_C = mx.sym.broadcast_sub(self.get_constant_symbol(1000), mask_NC)
mask_C = mx.sym.BlockGrad(mask_C)
mask_NC = mx.sym.BlockGrad(mask_NC)
R = mx.sym.reshape(mx.sym.transpose(representatives, axes=(0, 2, 1)), shape=(0, (- 1)))
R2R_cos_sim = mx.sym.dot(R, R, transpose_a=True)
R2R = self.cos_sim_2_dist_generic(R2R_cos_sim, x=R, y=R, x_is_norm=cfg.network.REP_L2_NORM, y_is_norm=cfg.network.REP_L2_NORM)
C2C = mx.sym.broadcast_add(R2R, mask_C)
C2NC = mx.sym.broadcast_add(R2R, mask_NC)
min_dist_C = mx.sym.topk(C2C, axis=1, k=2, ret_typ='value', is_ascend=True)
min_dist_C = mx.sym.slice_axis(min_dist_C, axis=1, begin=1, end=2)
min_dist_NC = mx.sym.min_axis(C2NC, axis=1, keepdims=True)
reps_cls_loss_val = mx.sym.broadcast_sub(min_dist_C, min_dist_NC)
reps_cls_loss_val = mx.sym.broadcast_add(reps_cls_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
reps_cls_loss_val = mx.sym.relu(reps_cls_loss_val)
probs = mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(((- 0.5) / float((cfg.network.SIGMA ** 2))))))
comb_cls_scores = mx.sym.max_axis(probs, axis=1, keepdims=False)
comb_cls_scores = mx.sym.broadcast_add(comb_cls_scores, self.get_constant_symbol(1e-07))
bg_scores = mx.sym.broadcast_sub(self.get_constant_symbol((1 + 1e-07)), mx.sym.max_axis(comb_cls_scores, axis=1, keepdims=True))
cls_score = mx.sym.concat(bg_scores, comb_cls_scores, dim=1, name='bg_concat')
cls_score = mx.sym.reshape(cls_score, shape=(0, (- 1)))
if cfg.network.SOFTMAX_ENABLED:
cls_score = mx.sym.broadcast_mul(self.get_constant_symbol(cfg.network.SOFTMAX_MUL), cls_score)
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_score_lin = mx.symbol.FullyConnected(name='cls_score_lin', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
if cfg.network.SOFTMAX_ENABLED:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
else:
zz = mx.sym.zeros_like(label)
cls_prob = mx.sym.BlockGrad(cls_score)
invalid = mx.sym.broadcast_equal(labels_ohem, self.get_constant_symbol((- 1)))
minoh_labels = mx.sym.one_hot(mx.sym.broadcast_add(mx.sym.cast(invalid, dtype='float32'), labels_ohem), depth=num_classes, on_value=(- 1), off_value=0)
ce_loss = mx.sym.where(invalid, x=zz, y=mx.sym.sum(mx.sym.broadcast_mul(minoh_labels, mx.sym.log(mx.sym.broadcast_add(cls_score, self.get_constant_symbol(1e-07)))), axis=1))
ce_loss = mx.sym.MakeLoss(ce_loss, normalization='valid')
bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
rcnn_label = labels_ohem
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weights_ohem, axis=1, begin=(- 1), end=None) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
rcnn_label = label
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=label, normalization='valid')
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weight, axis=1, begin=0, end=1) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
if cfg.network.EMBED_LOSS_ENABLED:
extra_outputs += [embed_loss]
if cfg.network.REPS_CLS_LOSS:
extra_outputs += [mx.sym.MakeLoss(name='reps_cls_loss', data=reps_cls_loss_val, grad_scale=(1.0 / (cfg.network.REPS_PER_CLASS * (num_classes - 1))))]
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
extra_outputs += [cls_prob_lin]
if (not cfg.network.SOFTMAX_ENABLED):
extra_outputs += [ce_loss]
extra_outputs += [mx.sym.BlockGrad(rois), mx.sym.identity(mx.sym.BlockGrad(batch_embed), name='psp_final_embed')]
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape')
if cfg.network.base_net_lock:
group = mx.sym.Group(([cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
group = mx.sym.Group(([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape')
cls_score_orig = cls_score
if cfg.network.SOFTMAX_ENABLED:
cls_score_orig = mx.sym.broadcast_div(cls_score_orig, self.get_constant_symbol(cfg.network.SOFTMAX_MUL))
cls_score_orig = mx.sym.Reshape(data=cls_score_orig, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes))
group = mx.sym.Group([rois, cls_prob, bbox_pred, mx.sym.identity(batch_embed, name='psp_final_embed'), mx.sym.identity(cls_score_orig, name='cls_score')])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
name = 'fc_representatives'
if cfg.network.SEPARABLE_REPS:
arg_params[(name + '_base_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_base_weight')])
arg_params[(name + '_offset_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_offset_weight')])
elif cfg.network.SEPARABLE_REPS_INIT:
C = mx.random.normal(0, 0.1, shape=(cfg.network.EMBEDDING_DIM, 1, (cfg.dataset.NUM_CLASSES - 1)))
R = mx.random.normal(0, 0.05, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
CR = (C + R)
arg_params[(name + '_weight')] = mx.nd.reshape(CR, shape=((- 1), 1))
else:
arg_params[(name + '_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_weight')])
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
arg_params['cls_score_lin_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_lin_weight'])
arg_params['cls_score_lin_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_lin_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
(arg_params2, aux_params2) = ({}, {})
for name in self.shared_param_list:
if ('offset' in name):
arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')])
else:
arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')])
arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if cfg.network.pretrained_weights_are_priority:
if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)):
arg_params[k] = arg_params2[k]
else:
arg_params[k] = arg_params2[k]
for k in aux_params2:
if cfg.network.pretrained_weights_are_priority:
if (k not in aux_params):
aux_params[k] = aux_params2[k]
else:
aux_params[k] = aux_params2[k]
|
class resnet_v1_101_fpn_rcnn(Symbol):
def __init__(self):
'\n Use __init__ to define parameter network needs\n '
self.shared_param_list = ['rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight'))
self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias'))
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu)
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6)
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix))
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix))
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix))
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix))
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix))
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix))
return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred)
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name='data')
im_info = mx.sym.Variable(name='im_info')
(res2, res3, res4, res5) = self.get_resnet_backbone(data)
(fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5)
(rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
(rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
(rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
(rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
(rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2}
rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2}
arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()))
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob')
rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE))
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape')
(rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling')
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
rcnn_label = label
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
for name in self.shared_param_list:
arg_params[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')])
arg_params[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')])
self.init_weight_rcnn(cfg, arg_params, aux_params)
self.init_weight_fpn(cfg, arg_params, aux_params)
|
def customize_compiler_for_nvcc(self):
"inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on."
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if (os.path.splitext(src)[1] == '.cu'):
self.set_executable('compiler_so', CUDA['nvcc'])
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
self.compiler_so = default_compiler_so
self._compile = _compile
|
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
|
def find_in_path(name, path):
'Find a file in a search path'
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
|
def locate_cuda():
"Locate the CUDA environment on the system\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n "
if ('CUDAHOME' in os.environ):
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', ((os.environ['PATH'] + os.pathsep) + default_path))
if (nvcc is None):
raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')}
for (k, v) in cudaconfig.iteritems():
if (not os.path.exists(v)):
raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v)))
return cudaconfig
|
def customize_compiler_for_nvcc(self):
"inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on."
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if (os.path.splitext(src)[1] == '.cu'):
self.set_executable('compiler_so', CUDA['nvcc'])
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
self.compiler_so = default_compiler_so
self._compile = _compile
|
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
|
def find_in_path(name, path):
'Find a file in a search path'
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
|
def locate_cuda():
"Locate the CUDA environment on the system\n\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n "
if ('CUDA_PATH' in os.environ):
home = os.environ['CUDA_PATH']
print(('home = %s\n' % home))
nvcc = pjoin(home, 'bin', nvcc_bin)
else:
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path(nvcc_bin, ((os.environ['PATH'] + os.pathsep) + default_path))
if (nvcc is None):
raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDA_PATH')
home = os.path.dirname(os.path.dirname(nvcc))
print(('home = %s, nvcc = %s\n' % (home, nvcc)))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, lib_dir)}
for (k, v) in cudaconfig.iteritems():
if (not os.path.exists(v)):
raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v)))
return cudaconfig
|
def customize_compiler_for_nvcc(self):
"inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on."
super = self.compile
def compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
postfix = os.path.splitext(sources[0])[1]
if (postfix == '.cu'):
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
return super(sources, output_dir, macros, include_dirs, debug, extra_preargs, postargs, depends)
self.compile = compile
|
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
|
class CUDA_build_ext(build_ext):
'\n Custom build_ext command that compiles CUDA files.\n Note that all extension source files will be processed with this compiler.\n '
def build_extensions(self):
self.compiler.src_extensions.append('.cu')
self.compiler.set_executable('compiler_so', 'nvcc')
self.compiler.set_executable('linker_so', 'nvcc --shared')
if hasattr(self.compiler, '_c_extensions'):
self.compiler._c_extensions.append('.cu')
self.compiler.spawn = self.spawn
build_ext.build_extensions(self)
def spawn(self, cmd, search_path=1, verbose=0, dry_run=0):
'\n Perform any CUDA specific customizations before actually launching\n compile/link etc. commands.\n '
if ((sys.platform == 'darwin') and (len(cmd) >= 2) and (cmd[0] == 'nvcc') and (cmd[1] == '--shared') and (cmd.count('-arch') > 0)):
while True:
try:
index = cmd.index('-arch')
del cmd[index:(index + 2)]
except ValueError:
break
elif (self.compiler.compiler_type == 'msvc'):
cmd[:1] = ['nvcc', '--compiler-bindir', (os.path.dirname(find_executable('cl.exe', PATH)) or cmd[0])]
for (idx, c) in enumerate(cmd):
if (c == '/c'):
cmd[idx] = '-c'
elif (c == '/DLL'):
cmd[idx] = '--shared'
elif ('-fPIC' in c):
del cmd[idx]
elif c.startswith('/Tc'):
cmd[idx] = c[3:]
elif c.startswith('/Fo'):
cmd[idx:(idx + 1)] = ['-o', c[3:]]
elif c.startswith('/LIBPATH:'):
cmd[idx] = ('-L' + c[9:])
elif c.startswith('/OUT:'):
cmd[idx:(idx + 1)] = ['-o', c[5:]]
elif c.startswith('/EXPORT:'):
del cmd[idx]
elif (c == 'cublas.lib'):
cmd[idx] = '-lcublas'
if ('--shared' in cmd):
pass_on = '--linker-options='
cmd.append('/NODEFAULTLIB:libcmt.lib')
else:
pass_on = '--compiler-options='
cmd = ([c for c in cmd if (c[0] != '/')] + [(pass_on + ','.join((c for c in cmd if (c[0] == '/'))))])
spawn(cmd, search_path, verbose, dry_run)
|
def get_segmentation_test_batch(segdb, config):
"\n return a dict of train batch\n :param segdb: ['image', 'flipped']\n :param config: the config setting\n :return: data, label, im_info\n "
(imgs, seg_cls_gts, segdb) = get_segmentation_image(segdb, config)
im_array = imgs
im_info = [np.array([segdb[i]['im_info']], dtype=np.float32) for i in xrange(len(segdb))]
data = [{'data': im_array[i], 'im_info': im_info[i]} for i in xrange(len(segdb))]
label = [{'label': seg_cls_gts[i]} for i in xrange(len(segdb))]
return (data, label, im_info)
|
def get_segmentation_train_batch(segdb, config):
"\n return a dict of train batch\n :param segdb: ['image', 'flipped']\n :param config: the config setting\n :return: data, label, im_info\n "
assert (len(segdb) == 1), 'Single batch only'
(imgs, seg_cls_gts, segdb) = get_segmentation_image(segdb, config)
im_array = imgs[0]
seg_cls_gt = seg_cls_gts[0]
im_info = np.array([segdb[0]['im_info']], dtype=np.float32)
data = {'data': im_array, 'im_info': im_info}
label = {'label': seg_cls_gt}
return (data, label)
|
def file_lines_to_list(path):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
|
class Pred():
def __init__(self, id, conf, left, top, right, bottom):
self.id = id
self.conf = conf
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def calc_pred_intersection(self, pred):
if (not (self.id == pred.id)):
return 0
left = max(self.left, pred.left)
right = min(self.right, pred.right)
if (left > right):
return 0
top = max(self.top, pred.top)
bottom = min(self.bottom, pred.bottom)
if (top > bottom):
return 0
return ((right - left) * (bottom - top))
|
def plot_preds(img, preds, clr=(255, 0, 255)):
if isinstance(img, str):
img = cv2.imread(img)
for pred in preds:
cv2.rectangle(img, (pred.left, pred.top), (pred.right, pred.bottom), clr)
cv2.putText(img, pred.id, (pred.left, pred.top), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
return img
|
def read_predictions(txt_file):
lines_list = file_lines_to_list(txt_file)
prediction = []
for line in lines_list:
(id, conf, left, top, right, bottom) = line.split(';')
pred = Pred(id, float(conf), int(left), int(top), int(right), int(bottom))
prediction.append(pred)
return prediction
|
class ObjDetStats():
def __init__(self, gt_roidb_fname, cat_ords, logger):
with open(gt_roidb_fname, 'rb') as fid:
self.roidb = cPickle.load(fid)
self.roidb_ni = {}
for entry in self.roidb:
image_path = entry['image']
(im_path, im_name) = os.path.split(image_path)
self.roidb_ni[im_name] = entry
self.cat_ords = cat_ords
self.logger = logger
def print_bboxes(self, dets_folder, query_images):
import cv2
from utils.show_boxes import show_dets_gt_boxes
for detsName in os.listdir(dets_folder):
if (detsName[(- 4):] == '.txt'):
detsPath = os.path.join(dets_folder, detsName)
imgName = detName_2_imgName(detsName)
entry = self.roidb_ni[imgName]
imgName = detName_2_imgName(detsName)
for q_im_path in query_images:
(_, q_im_name) = os.path.split(q_im_path)
if (q_im_name == imgName):
im = cv2.imread(q_im_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
dvisName = imgName_2_dvisName(imgName)
(q_dets, cat_names) = img_dets_CSV_2_A(detsPath, self.cat_ords)
show_dets_gt_boxes(im, q_dets, cat_names, entry['boxes'], entry['gt_names'], scale=1.0, FS=12, LW=1.5, save_file_path=os.path.join(dets_folder, dvisName))
break
def add_base_stats_inDomain(self, dets_folder, perf_stats, Nclasses=1, ovthresh=0.5):
for detsName in os.listdir(dets_folder):
if (detsName[(- 4):] == '.txt'):
(q_dets, cat_names) = img_dets_CSV_2_A(os.path.join(dets_folder, detsName), self.cat_ords)
imgName = detName_2_imgName(detsName)
entry = self.roidb_ni[imgName]
gt_boxes_test = []
gt_classes_test = []
for (gt_box, gt_class) in zip(entry['boxes'], entry['gt_classes']):
if (gt_class in self.cat_ords):
gt_boxes_test += [gt_box]
gt_classes_test += [gt_class]
gt_classes_test = np.asarray(gt_classes_test)
gt_boxes_test = np.asarray(gt_boxes_test)
perf_stats.comp_epi_stats_m(q_dets, gt_boxes_test, gt_classes_test, self.cat_ords, ovthresh)
perf_stats.print_perf(self.logger, prefix='_')
return perf_stats
|
class PrefetchingIter(mx.io.DataIter):
'Base class for prefetching iterators. Takes one or more DataIters (\n or any class with "reset" and "next" methods) and combine them with\n prefetching. For example:\n\n Parameters\n ----------\n iters : DataIter or list of DataIter\n one or more DataIters (or any class with "reset" and "next" methods)\n rename_data : None or list of dict\n i-th element is a renaming map for i-th iter, in the form of\n {\'original_name\' : \'new_name\'}. Should have one entry for each entry\n in iter[i].provide_data\n rename_label : None or list of dict\n Similar to rename_data\n\n Examples\n --------\n iter = PrefetchingIter([NDArrayIter({\'data\': X1}), NDArrayIter({\'data\': X2})],\n rename_data=[{\'data\': \'data1\'}, {\'data\': \'data2\'}])\n '
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if (not isinstance(iters, list)):
iters = [iters]
self.n_iter = len(iters)
assert (self.n_iter == 1), 'Our prefetching iter only support 1 DataIter'
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = (len(self.provide_data) * self.provide_data[0][0][1][0])
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for e in self.data_taken:
e.set()
self.started = True
self.current_batch = [None for _ in range(self.n_iter)]
self.next_batch = [None for _ in range(self.n_iter)]
def prefetch_func(self, i):
'Thread entry'
while True:
self.data_taken[i].wait()
if (not self.started):
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for e in self.data_taken:
e.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
'The name and shape of data provided by this iterator'
if (self.rename_data is None):
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[(DataDesc(r[x.name], x.shape, x.dtype) if isinstance(x, DataDesc) else DataDesc(*x)) for x in i.provide_data] for (r, i) in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
'The name and shape of label provided by this iterator'
if (self.rename_label is None):
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[(DataDesc(r[x.name], x.shape, x.dtype) if isinstance(x, DataDesc) else DataDesc(*x)) for x in i.provide_label] for (r, i) in zip(self.rename_label, self.iters)], [])
def reset(self):
for e in self.data_ready:
e.wait()
for i in self.iters:
i.reset()
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
def iter_next(self):
for e in self.data_ready:
e.wait()
if (self.next_batch[0] is None):
return False
else:
self.current_batch = self.next_batch[0]
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
|
def combine_model(prefix1, epoch1, prefix2, epoch2, prefix_out, epoch_out):
(args1, auxs1) = load_checkpoint(prefix1, epoch1)
(args2, auxs2) = load_checkpoint(prefix2, epoch2)
arg_names = (args1.keys() + args2.keys())
aux_names = (auxs1.keys() + auxs2.keys())
args = dict()
for arg in arg_names:
if (arg in args1):
args[arg] = args1[arg]
if (arg in args2):
args[arg] = args2[arg]
auxs = dict()
for aux in aux_names:
if (aux in auxs1):
auxs[aux] = auxs1[aux]
if (aux in auxs2):
auxs[aux] = auxs2[aux]
save_checkpoint(prefix_out, epoch_out, args, auxs)
|
@mx.init.register
class MyConstant(mx.init.Initializer):
def __init__(self, value):
super(MyConstant, self).__init__(value=value)
self.value = value
def _init_weight(self, _, arr):
arr[:] = mx.nd.array(self.value)
|
def create_logger(root_output_path, cfg, image_set):
if (not os.path.exists(root_output_path)):
os.makedirs(root_output_path)
assert os.path.exists(root_output_path), '{} does not exist'.format(root_output_path)
cfg_name = os.path.basename(cfg).split('.')[0]
config_output_path = os.path.join(root_output_path, '{}'.format(cfg_name))
if (not os.path.exists(config_output_path)):
os.makedirs(config_output_path)
image_sets = [iset for iset in image_set.split(';')]
final_output_path = os.path.join(config_output_path, '{}'.format('_'.join(image_sets)))
if (not os.path.exists(final_output_path)):
os.makedirs(final_output_path)
log_file = '{}_{}.log'.format(cfg_name, time.strftime('%Y-%m-%d-%H-%M'))
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
return (logger, final_output_path)
|
class UnknownImageFormat(Exception):
pass
|
class Image(collections.namedtuple('Image', image_fields)):
def to_str_row(self):
return ('%d\t%d\t%d\t%s\t%s' % (self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t')))
def to_str_row_verbose(self):
return ('%d\t%d\t%d\t%s\t%s\t##%s' % (self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t'), self))
def to_str_json(self, indent=None):
return json.dumps(self._asdict(), indent=indent)
|
def get_image_size(file_path):
'\n Return (width, height) for a given img file content - no external\n dependencies except the os and struct builtin modules\n '
img = get_image_metadata(file_path)
return (img.width, img.height)
|
def get_image_size_from_bytesio(input, size):
'\n Return (width, height) for a given img file content - no external\n dependencies except the os and struct builtin modules\n\n Args:\n input (io.IOBase): io object support read & seek\n size (int): size of buffer in byte\n '
img = get_image_metadata_from_bytesio(input, size)
return (img.width, img.height)
|
def get_image_metadata(file_path):
'\n Return an `Image` object for a given img file content - no external\n dependencies except the os and struct builtin modules\n\n Args:\n file_path (str): path to an image file\n\n Returns:\n Image: (path, type, file_size, width, height)\n '
size = os.path.getsize(file_path)
with io.open(file_path, 'rb') as input:
return get_image_metadata_from_bytesio(input, size, file_path)
|
def get_image_metadata_from_bytesio(input, size, file_path=None):
'\n Return an `Image` object for a given img file content - no external\n dependencies except the os and struct builtin modules\n\n Args:\n input (io.IOBase): io object support read & seek\n size (int): size of buffer in byte\n file_path (str): path to an image file\n\n Returns:\n Image: (path, type, file_size, width, height)\n '
height = (- 1)
width = (- 1)
data = input.read(26)
msg = ' raised while trying to decode as JPEG.'
if ((size >= 10) and (data[:6] in (b'GIF87a', b'GIF89a'))):
imgtype = GIF
(w, h) = struct.unpack('<HH', data[6:10])
width = int(w)
height = int(h)
elif ((size >= 24) and data.startswith(b'\x89PNG\r\n\x1a\n') and (data[12:16] == b'IHDR')):
imgtype = PNG
(w, h) = struct.unpack('>LL', data[16:24])
width = int(w)
height = int(h)
elif ((size >= 16) and data.startswith(b'\x89PNG\r\n\x1a\n')):
imgtype = PNG
(w, h) = struct.unpack('>LL', data[8:16])
width = int(w)
height = int(h)
elif ((size >= 2) and data.startswith(b'\xff\xd8')):
imgtype = JPEG
input.seek(0)
input.read(2)
b = input.read(1)
try:
while (b and (ord(b) != 218)):
while (ord(b) != 255):
b = input.read(1)
while (ord(b) == 255):
b = input.read(1)
if ((ord(b) >= 192) and (ord(b) <= 195)):
input.read(3)
(h, w) = struct.unpack('>HH', input.read(4))
break
else:
input.read((int(struct.unpack('>H', input.read(2))[0]) - 2))
b = input.read(1)
width = int(w)
height = int(h)
except struct.error:
raise UnknownImageFormat(('StructError' + msg))
except ValueError:
raise UnknownImageFormat(('ValueError' + msg))
except Exception as e:
raise UnknownImageFormat((e.__class__.__name__ + msg))
elif ((size >= 26) and data.startswith(b'BM')):
imgtype = 'BMP'
headersize = struct.unpack('<I', data[14:18])[0]
if (headersize == 12):
(w, h) = struct.unpack('<HH', data[18:22])
width = int(w)
height = int(h)
elif (headersize >= 40):
(w, h) = struct.unpack('<ii', data[18:26])
width = int(w)
height = abs(int(h))
else:
raise UnknownImageFormat(('Unkown DIB header size:' + str(headersize)))
elif ((size >= 8) and (data[:4] in (b'II*\x00', b'MM\x00*'))):
imgtype = TIFF
byteOrder = data[:2]
boChar = ('>' if (byteOrder == 'MM') else '<')
tiffTypes = {1: (1, (boChar + 'B')), 2: (1, (boChar + 'c')), 3: (2, (boChar + 'H')), 4: (4, (boChar + 'L')), 5: (8, (boChar + 'LL')), 6: (1, (boChar + 'b')), 7: (1, (boChar + 'c')), 8: (2, (boChar + 'h')), 9: (4, (boChar + 'l')), 10: (8, (boChar + 'll')), 11: (4, (boChar + 'f')), 12: (8, (boChar + 'd'))}
ifdOffset = struct.unpack((boChar + 'L'), data[4:8])[0]
try:
countSize = 2
input.seek(ifdOffset)
ec = input.read(countSize)
ifdEntryCount = struct.unpack((boChar + 'H'), ec)[0]
ifdEntrySize = 12
for i in range(ifdEntryCount):
entryOffset = ((ifdOffset + countSize) + (i * ifdEntrySize))
input.seek(entryOffset)
tag = input.read(2)
tag = struct.unpack((boChar + 'H'), tag)[0]
if ((tag == 256) or (tag == 257)):
type = input.read(2)
type = struct.unpack((boChar + 'H'), type)[0]
if (type not in tiffTypes):
raise UnknownImageFormat(('Unkown TIFF field type:' + str(type)))
typeSize = tiffTypes[type][0]
typeChar = tiffTypes[type][1]
input.seek((entryOffset + 8))
value = input.read(typeSize)
value = int(struct.unpack(typeChar, value)[0])
if (tag == 256):
width = value
else:
height = value
if ((width > (- 1)) and (height > (- 1))):
break
except Exception as e:
raise UnknownImageFormat(str(e))
elif (size >= 2):
imgtype = 'ICO'
input.seek(0)
reserved = input.read(2)
if (0 != struct.unpack('<H', reserved)[0]):
raise UnknownImageFormat(FILE_UNKNOWN)
format = input.read(2)
assert (1 == struct.unpack('<H', format)[0])
num = input.read(2)
num = struct.unpack('<H', num)[0]
if (num > 1):
import warnings
warnings.warn('ICO File contains more than one image')
w = input.read(1)
h = input.read(1)
width = ord(w)
height = ord(h)
else:
raise UnknownImageFormat(FILE_UNKNOWN)
return Image(path=file_path, type=imgtype, file_size=size, width=width, height=height)
|
class Test_get_image_size(unittest.TestCase):
data = [{'path': 'lookmanodeps.png', 'width': 251, 'height': 208, 'file_size': 22228, 'type': 'PNG'}]
def setUp(self):
pass
def test_get_image_size_from_bytesio(self):
img = self.data[0]
p = img['path']
with io.open(p, 'rb') as fp:
b = fp.read()
fp = io.BytesIO(b)
sz = len(b)
output = get_image_size_from_bytesio(fp, sz)
self.assertTrue(output)
self.assertEqual(output, (img['width'], img['height']))
def test_get_image_metadata_from_bytesio(self):
img = self.data[0]
p = img['path']
with io.open(p, 'rb') as fp:
b = fp.read()
fp = io.BytesIO(b)
sz = len(b)
output = get_image_metadata_from_bytesio(fp, sz)
self.assertTrue(output)
for field in image_fields:
self.assertEqual(getattr(output, field), (None if (field == 'path') else img[field]))
def test_get_image_metadata(self):
img = self.data[0]
output = get_image_metadata(img['path'])
self.assertTrue(output)
for field in image_fields:
self.assertEqual(getattr(output, field), img[field])
def test_get_image_metadata__ENOENT_OSError(self):
with self.assertRaises(OSError):
get_image_metadata('THIS_DOES_NOT_EXIST')
def test_get_image_metadata__not_an_image_UnknownImageFormat(self):
with self.assertRaises(UnknownImageFormat):
get_image_metadata('README.rst')
def test_get_image_size(self):
img = self.data[0]
output = get_image_size(img['path'])
self.assertTrue(output)
self.assertEqual(output, (img['width'], img['height']))
def tearDown(self):
pass
|
def main(argv=None):
'\n Print image metadata fields for the given file path.\n\n Keyword Arguments:\n argv (list): commandline arguments (e.g. sys.argv[1:])\n Returns:\n int: zero for OK\n '
import logging
import optparse
import sys
prs = optparse.OptionParser(usage='%prog [-v|--verbose] [--json|--json-indent] <path0> [<pathN>]', description='Print metadata for the given image paths (without image library bindings).')
prs.add_option('--json', dest='json', action='store_true')
prs.add_option('--json-indent', dest='json_indent', action='store_true')
prs.add_option('-v', '--verbose', dest='verbose', action='store_true')
prs.add_option('-q', '--quiet', dest='quiet', action='store_true')
prs.add_option('-t', '--test', dest='run_tests', action='store_true')
argv = (list(argv) if (argv is not None) else sys.argv[1:])
(opts, args) = prs.parse_args(args=argv)
loglevel = logging.INFO
if opts.verbose:
loglevel = logging.DEBUG
elif opts.quiet:
loglevel = logging.ERROR
logging.basicConfig(level=loglevel)
log = logging.getLogger()
log.debug('argv: %r', argv)
log.debug('opts: %r', opts)
log.debug('args: %r', args)
if opts.run_tests:
import sys
sys.argv = ([sys.argv[0]] + args)
import unittest
return unittest.main()
output_func = Image.to_str_row
if opts.json_indent:
import functools
output_func = functools.partial(Image.to_str_json, indent=2)
elif opts.json:
output_func = Image.to_str_json
elif opts.verbose:
output_func = Image.to_str_row_verbose
EX_OK = 0
EX_NOT_OK = 2
if (len(args) < 1):
prs.print_help()
print('')
prs.error('You must specify one or more paths to image files')
errors = []
for path_arg in args:
try:
img = get_image_metadata(path_arg)
print(output_func(img))
except KeyboardInterrupt:
raise
except OSError as e:
log.error((path_arg, e))
errors.append((path_arg, e))
except Exception as e:
log.exception(e)
errors.append((path_arg, e))
pass
if len(errors):
import pprint
print('ERRORS', file=sys.stderr)
print('======', file=sys.stderr)
print(pprint.pformat(errors, indent=2), file=sys.stderr)
return EX_NOT_OK
return EX_OK
|
def load_checkpoint(prefix, epoch):
"\n Load model checkpoint from file.\n :param prefix: Prefix of model name.\n :param epoch: Epoch number of model we would like to load.\n :return: (arg_params, aux_params)\n arg_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n aux_params : dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n "
save_dict = mx.nd.load(('%s-%04d.params' % (prefix, epoch)))
arg_params = {}
aux_params = {}
for (k, v) in save_dict.items():
(tp, name) = k.split(':', 1)
if (tp == 'arg'):
arg_params[name] = v
if (tp == 'aux'):
aux_params[name] = v
return (arg_params, aux_params)
|
def convert_context(params, ctx):
'\n :param params: dict of str to NDArray\n :param ctx: the context to convert to\n :return: dict of str of NDArray with context ctx\n '
new_params = dict()
for (k, v) in params.items():
new_params[k] = v.as_in_context(ctx)
return new_params
|
def load_param(prefix, epoch, convert=False, ctx=None, process=False):
'\n wrapper for load checkpoint\n :param prefix: Prefix of model name.\n :param epoch: Epoch number of model we would like to load.\n :param convert: reference model should be converted to GPU NDArray first\n :param ctx: if convert then ctx must be designated.\n :param process: model should drop any test\n :return: (arg_params, aux_params)\n '
(arg_params, aux_params) = load_checkpoint(prefix, epoch)
if convert:
if (ctx is None):
ctx = mx.cpu()
arg_params = convert_context(arg_params, ctx)
aux_params = convert_context(aux_params, ctx)
if process:
tests = [k for k in arg_params.keys() if ('_test' in k)]
for test in tests:
arg_params[test.replace('_test', '')] = arg_params.pop(test)
return (arg_params, aux_params)
|
class WarmupMultiFactorScheduler(LRScheduler):
'Reduce learning rate in factor at steps specified in a list\n\n Assume the weight has been updated by n times, then the learning rate will\n be\n\n base_lr * factor^(sum((step/n)<=1)) # step is an array\n\n Parameters\n ----------\n step: list of int\n schedule learning rate after n updates\n factor: float\n the factor for reducing the learning rate\n '
def __init__(self, step, factor=1, warmup=False, warmup_lr=0, warmup_step=0):
super(WarmupMultiFactorScheduler, self).__init__()
assert (isinstance(step, list) and (len(step) >= 1))
for (i, _step) in enumerate(step):
if ((i != 0) and (step[i] <= step[(i - 1)])):
raise ValueError('Schedule step must be an increasing integer list')
if (_step < 1):
raise ValueError('Schedule step must be greater or equal than 1 round')
if (factor > 1.0):
raise ValueError('Factor must be no more than 1 to make lr reduce')
self.step = step
self.cur_step_ind = 0
self.factor = factor
self.count = 0
self.warmup = warmup
self.warmup_lr = warmup_lr
self.warmup_step = warmup_step
def __call__(self, num_update):
'\n Call to schedule current learning rate\n\n Parameters\n ----------\n num_update: int\n the maximal number of updates applied to a weight.\n '
if (self.warmup and (num_update < self.warmup_step)):
return self.warmup_lr
while (self.cur_step_ind <= (len(self.step) - 1)):
if (num_update > self.step[self.cur_step_ind]):
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info('Update[%d]: Change learning rate to %0.5e', num_update, self.base_lr)
else:
return self.base_lr
return self.base_lr
|
def save_checkpoint(prefix, epoch, arg_params, aux_params):
"Checkpoint the model data into file.\n :param prefix: Prefix of model name.\n :param epoch: The epoch number of the model.\n :param arg_params: dict of str to NDArray\n Model parameter, dict of name to NDArray of net's weights.\n :param aux_params: dict of str to NDArray\n Model parameter, dict of name to NDArray of net's auxiliary states.\n :return: None\n prefix-epoch.params will be saved for parameters.\n "
save_dict = {('arg:%s' % k): v for (k, v) in arg_params.items()}
save_dict.update({('aux:%s' % k): v for (k, v) in aux_params.items()})
param_name = ('%s-%04d.params' % (prefix, epoch))
mx.nd.save(param_name, save_dict)
|
class Symbol():
def __init__(self):
self.arg_shape_dict = None
self.out_shape_dict = None
self.aux_shape_dict = None
self.sym = None
@property
def symbol(self):
return self.sym
def get_symbol(self, cfg, is_train=True):
'\n return a generated symbol, it also need to be assigned to self.sym\n '
raise NotImplementedError()
def init_weights(self, cfg, arg_params, aux_params):
raise NotImplementedError()
def get_msra_std(self, shape):
fan_in = float(shape[1])
if (len(shape) > 2):
fan_in *= np.prod(shape[2:])
print(np.sqrt((2 / fan_in)))
return np.sqrt((2 / fan_in))
def infer_shape(self, data_shape_dict):
(arg_shape, out_shape, aux_shape) = self.sym.infer_shape(**data_shape_dict)
self.arg_shape_dict = dict(zip(self.sym.list_arguments(), arg_shape))
self.out_shape_dict = dict(zip(self.sym.list_outputs(), out_shape))
self.aux_shape_dict = dict(zip(self.sym.list_auxiliary_states(), aux_shape))
def check_parameter_shapes(self, arg_params, aux_params, data_shape_dict, is_train=True):
for k in self.sym.list_arguments():
if ((k in data_shape_dict) or (False if is_train else ('label' in k)) or ('const_eq_' in k) or ('cls_reps' in k)):
continue
assert (k in arg_params), (k + ' not initialized')
assert (arg_params[k].shape == self.arg_shape_dict[k]), ((((('shape inconsistent for ' + k) + ' inferred ') + str(self.arg_shape_dict[k])) + ' provided ') + str(arg_params[k].shape))
for k in self.sym.list_auxiliary_states():
assert (k in aux_params), (k + ' not initialized')
assert (aux_params[k].shape == self.aux_shape_dict[k]), ((((('shape inconsistent for ' + k) + ' inferred ') + str(self.aux_shape_dict[k])) + ' provided ') + str(aux_params[k].shape))
|
def tic():
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
return startTime_for_tictoc
|
def toc():
if ('startTime_for_tictoc' in globals()):
endTime = time.time()
return (endTime - startTime_for_tictoc)
else:
return None
|
def get_dets_class_names(dets_n):
for det in dets_n:
class_names = []
for prd in det[3]:
if (prd in prd2ename.keys()):
class_names += [prd2ename[prd]]
else:
class_names += [prd]
det += [class_names]
return dets_n
|
def print_typical_tray_multiview():
imgname_left = os.path.join(data_root, '15849_left.jpg')
imgname_top = os.path.join(data_root, '15849_top.jpg')
imgname_right = os.path.join(data_root, '15849_right.jpg')
img_left = mpimg.imread(imgname_left)
img_top = mpimg.imread(imgname_top)
img_right = mpimg.imread(imgname_right)
fig = plt.figure(1)
ff = 3
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
plt.subplot(131)
plt.imshow(img_left)
plt.axis('off')
plt.subplot(132)
plt.imshow(img_top)
plt.axis('off')
plt.subplot(133)
plt.imshow(img_right)
plt.axis('off')
|
def display_few_shot_examples():
image_set = ['5cadb37d4b967f67d3047964.jpg', '12686073-1-white.jpg', 'lacoste-logo-sweatshirt-white-p12654-72879_image.jpg']
fig = plt.figure(2)
ff = 2
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
for (cnt, img_basename) in enumerate(image_set):
imgname = os.path.join(data_root, img_basename)
img = mpimg.imread(imgname)
plt.subplot(1, 3, (cnt + 1))
plt.imshow(img)
plt.axis('off')
|
def test_on_query_image(fs_serv, test_img_fname, det_engines):
img = cv2.imread(test_img_fname, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION))
score_thresh = 0.1
dets_n = fs_serv.detect_on_image(img, score_thresh=score_thresh, det_engines=det_engines)
dets_n = get_dets_class_names(dets_n)
test_img_basename = os.path.basename(test_img_fname)
img_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_disp_n.jpg'))
disp_dets(img, dets_n, img_file_path, figure_factor=2.0)
|
def get_box_proposal(fs_serv, img_path):
from show_boxes import show_detsB_boxes
q_dets_p = fs_serv.get_box_proposal(img_path)
image_basename = os.path.basename(img_path)
save_file_path = os.path.join(disp_folder, 'box_prop_{0}'.format(image_basename))
img = cv2.cvtColor(cv2.imread(img_path, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB)
show_detsB_boxes(img, q_dets_p, save_file_path=save_file_path)
|
def disp_dets2(img, dets, save_file_path):
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.set_size_inches(((1.4 * 8.5), (1.4 * 11)), forward=False)
plt.axis('off')
img_d = cv2.cvtColor(copy.deepcopy(img), cv2.COLOR_BGR2RGB)
w = 12
for det in dets:
bbox = det[0]
scores = det[1]
labels = det[3]
pn = det[4]
left = int(bbox[0])
top = int(bbox[1])
right = int(bbox[2])
bottom = int(bbox[3])
if (pn == 'p'):
cv2.rectangle(img_d, (left, top), (right, bottom), (255, 0, 0), 4)
else:
cv2.rectangle(img_d, (left, top), (right, bottom), (0, 255, 0), 4)
i = 0
for (s, l) in zip(scores, labels):
cv2.rectangle(img_d, (left, (top - w)), ((left + 450), ((top + w) + ((2 * w) * i))), (255, 255, 255), cv2.FILLED)
i += 1
i = 0
for (s, l) in zip(scores, labels):
text = '{0} - {1:.3f} {2}'.format(l, s, id2name[l])
cv2.putText(img_d, text, (left, (top + ((2 * w) * i))), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color=(0, 0, 0), thickness=2)
i += 1
plt.imshow(img_d)
fig.savefig(save_file_path)
|
def test_on_query_image(fs_serv, test_img_fname, score_thresh=0.1, det_engines=1, figure_factor=2.2, FontScale=2.3):
img = cv2.imread(test_img_fname, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION))
dets_n = fs_serv.detect_on_image(img, score_thresh=score_thresh, det_engines=det_engines)
test_img_basename = os.path.basename(test_img_fname)
img_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_disp_n.jpg'))
disp_dets2(img, dets_n, img_file_path, figure_factor=figure_factor, FontScale=FontScale)
|
def display_RedHat_examples():
image_set = ['logo1.jpg', 'logo2.jpg', 'logo3.jpg']
fig = plt.figure(2)
ff = 2
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
enrollment_root = '../data/Logo/logo_usecase_data/RedHat/RedHat_enrollment'
for (cnt, img_basename) in enumerate(image_set):
imgname = os.path.join(enrollment_root, img_basename)
img = mpimg.imread(imgname)
plt.subplot(1, 3, (cnt + 1))
plt.imshow(img)
plt.axis('off')
|
def display_lacoste_examples():
image_set = ['5cadb37d4b967f67d3047964.jpg', 'lacoste-logo-sweatshirt-white-p12654-72879_image.jpg', '12686073-1-white.jpg']
fig = plt.figure(2)
ff = 2
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
enrollment_root = '../data/Logo/logo_usecase_data/lacoste/lacoste_enrollment/images'
for (cnt, img_basename) in enumerate(image_set):
imgname = os.path.join(enrollment_root, img_basename)
img = mpimg.imread(imgname)
plt.subplot(1, 3, (cnt + 1))
plt.imshow(img)
plt.axis('off')
|
def display_few_shot_examples():
data_root = '/dccstor/jsdata1/dev/RepMet/notebooks/food_usecase_data'
image_set = ['PRDS990000000000000025_0_192_501_589_885_top.jpg', 'PRDS990000000000000024_0_119_137_523_447_top.jpg', 'PRDS990000000000000023_0_118_208_470_612_top.jpg', 'PRDS990000000000000021_0_571_234_923_608_top.jpg']
nrows = 1
ncols = 4
fig = plt.figure(2)
ff = 2
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
for (cnt, img_basename) in enumerate(image_set):
imgname = os.path.join(data_root, img_basename)
img = mpimg.imread(imgname)
plt.subplot(nrows, ncols, (cnt + 1))
plt.imshow(img)
plt.axis('off')
|
def print_typical_tray_multiview():
imgname_left = os.path.join(data_root, '1007_10000000006530_left.jpg')
imgname_top = os.path.join(data_root, '1007_10000000006530_top.jpg')
imgname_right = os.path.join(data_root, '1007_10000000006530_right.jpg')
img_left = mpimg.imread(imgname_left)
img_top = mpimg.imread(imgname_top)
img_right = mpimg.imread(imgname_right)
fig = plt.figure(1)
ff = 3
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
plt.subplot(131)
plt.imshow(img_left)
plt.axis('off')
plt.subplot(132)
plt.imshow(img_top)
plt.axis('off')
plt.subplot(133)
plt.imshow(img_right)
plt.axis('off')
|
def disp_dets(img, dets, save_file_path):
import matplotlib.pyplot as plt
fig = plt.figure(1)
ff = 2
fig.set_size_inches(((ff * 8.5), (ff * 11)), forward=False)
plt.axis('off')
img_d = cv2.cvtColor(copy.deepcopy(img), cv2.COLOR_BGR2RGB)
for det in dets:
bbox = det[0]
scores = det[1]
labels = det[3]
pn = det[4]
left = int(bbox[0])
top = int(bbox[1])
right = int(bbox[2])
bottom = int(bbox[3])
if (pn == 'p'):
cv2.rectangle(img_d, (left, top), (right, bottom), (255, 0, 0), 4)
elif (pn == 'n'):
cv2.rectangle(img_d, (left, top), (right, bottom), (0, 255, 0), 4)
elif (pn == 'c'):
cv2.rectangle(img_d, (left, top), (right, bottom), (0, 0, 255), 4)
elif (pn == 'a'):
cv2.rectangle(img_d, (left, top), (right, bottom), (0, 255, 255), 4)
for det in dets:
bbox = det[0]
scores = det[1]
labels = det[3]
pn = det[4]
left = int(bbox[0])
top = int(bbox[1])
text = '{0} - {1:.3f}'.format(labels[0], scores[0])
string_len = (len(text) * 13)
bk_w = 17
FontScale = 0.6
cv2.rectangle(img_d, ((left - 2), (top - bk_w)), ((left + string_len), (top + bk_w)), (255, 255, 255), cv2.FILLED)
cv2.putText(img_d, text, (left, top), cv2.FONT_HERSHEY_TRIPLEX, FontScale, color=(0, 0, 0), thickness=1)
if (len(det) == 6):
text = det[5][0]
string_len = (len(text) * 13)
cv2.rectangle(img_d, ((left - 2), ((top - bk_w) + (2 * bk_w))), ((left + string_len), ((top + bk_w) + (2 * bk_w))), (255, 255, 255), cv2.FILLED)
cv2.putText(img_d, text, (left, (top + (2 * bk_w))), cv2.FONT_HERSHEY_TRIPLEX, FontScale, color=(0, 0, 0), thickness=1)
plt.imshow(img_d)
fig.savefig(save_file_path)
|
def test_on_query_image(fs_serv, test_img_fname, det_engines):
prd2ename_fname = '/dccstor/jsdata1/dev/RepMet/data/JES_pilot/all_GT.csv_converted_Feb24_prd2ename.csv'
prd2ename_data = np.load(prd2ename_fname)
prd2ename = prd2ename_data['prd2ename']
img = cv2.imread(test_img_fname, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION))
disp_folder = '../notebooks/food_usecase_data'
score_thresh = 0.1
dets_n = fs_serv.detect_on_image(img, score_thresh=score_thresh, num_candidates=3, det_engines=det_engines)
for det in dets_n:
class_names = []
for prd in det[3]:
if (prd in prd2ename.keys()):
class_names += [prd2ename[prd]]
else:
class_names += [prd]
det += [class_names]
test_img_basename = os.path.basename(test_img_fname)
img_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_disp_n.jpg'))
det_file_path = os.path.join(disp_folder, test_img_basename.replace('.jpg', '_dets_n.txt'))
disp_dets(img, dets_n, img_file_path)
|
def get_box_proposal(fs_serv, img_path):
from show_boxes import show_detsB_boxes
output_folder = '../notebooks/food_usecase_data'
q_dets_p = fs_serv.get_box_proposal(img_path)
image_basename = os.path.basename(img_path)
save_file_path = os.path.join(output_folder, 'box_prop_{0}'.format(image_basename))
img = cv2.cvtColor(cv2.imread(img_path, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB)
show_detsB_boxes(img, q_dets_p, save_file_path=save_file_path)
|
def display_few_shot_examples():
data_root = '/opt/DNN/dataset/retail/retail_test'
image_set = ['0de3ec888566edc2.jpg', '2e90529bcb43f44c.jpg', '15a8f82801fe4911.jpg', '34caf4cf9ae0ae92.jpg']
nrows = 1
ncols = np.ceil((len(image_set) / nrows))
print('nrows={0},ncols={1}'.format(nrows, ncols))
fig = plt.figure(2)
ff = 2
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
for (cnt, img_basename) in enumerate(image_set):
imgname = os.path.join(data_root, img_basename)
img = mpimg.imread(imgname)
plt.subplot(nrows, ncols, (cnt + 1))
plt.imshow(img)
plt.axis('off')
|
def print_typical_tray_multiview():
data_root = '/opt/DNN/dataset/retail/retail_test'
imgname_left = os.path.join(data_root, '0de3ec888566edc2.jpg')
imgname_top = os.path.join(data_root, '2e90529bcb43f44c.jpg')
imgname_right = os.path.join(data_root, '15a8f82801fe4911.jpg')
img_left = mpimg.imread(imgname_left)
img_top = mpimg.imread(imgname_top)
img_right = mpimg.imread(imgname_right)
fig = plt.figure(1)
ff = 3
fig.set_size_inches(((ff * 8.5), ((3 * ff) * 11)), forward=False)
plt.subplot(131)
plt.imshow(img_left)
plt.axis('off')
plt.subplot(132)
plt.imshow(img_top)
plt.axis('off')
plt.subplot(133)
plt.imshow(img_right)
plt.axis('off')
|
def oid2coco():
img_dir = '/opt/DNN/dataset/openimage/openimage_train'
boxes_csv = '/opt/DNN/dataset/openimage/retail.csv'
json_file = '/opt/DNN/dataset/openimage/annotations/instances_openimage_train.json'
classes_csv = '/opt/DNN/linkdata/eval_dataset/open_images/metadata/class-descriptions-boxable.csv'
with open(classes_csv, 'r') as classes_file:
classes = csv.reader(classes_file)
class_dict = dict(((rows[0], rows[1]) for rows in classes))
classes_keys = class_dict.keys()
classes_values = class_dict.values()
(images, anns, categories) = ([], [], [])
img_paths = [x for x in glob.glob((img_dir + '/*.jpg'))]
i = 1
for img_path in sorted(img_paths):
img = Image.open(img_path)
(width, height) = img.size
(_, img_name) = os.path.split(img_path)
dic = {'file_name': img_name, 'id': i, 'height': height, 'width': width}
images.append(dic)
i += 1
ann_index = 1
i = 0
with open(boxes_csv, 'r') as boxes:
lines = csv.reader(boxes)
file_name_last_line = ''
for line in lines:
if (line[0] == 'ImageID'):
continue
file_name = (line[0] + '.jpg')
full_image_path = os.path.join(img_dir, file_name)
if (os.path.exists(full_image_path) is not True):
continue
if (file_name != file_name_last_line):
i += 1
img = Image.open(full_image_path)
(width, height) = img.size
xmin = float(line[4])
xmax = float(line[5])
ymin = float(line[6])
ymax = float(line[7])
area = int((((xmax - xmin) * width) * ((ymax - ymin) * height)))
poly = []
bbox = [int((xmin * width)), int((ymin * height)), int(((xmax - xmin) * width)), int(((ymax - ymin) * height))]
category = line[2]
cat_id = classes_keys.index(category)
dic2 = {'segmentation': poly, 'area': area, 'iscrowd': 0, 'image_id': i, 'bbox': bbox, 'category_id': cat_id, 'id': ann_index, 'ignore': 0}
anns.append(dic2)
file_name_last_line = file_name
ann_index += 1
for cate in classes_values:
cat = {'supercategory': 'none', 'id': classes_values.index(cate), 'name': cate}
categories.append(cat)
data = {'images': images, 'type': 'instances', 'annotations': anns, 'categories': categories}
with open(json_file, 'w') as outfile:
json.dump(data, outfile)
with open((os.path.split(json_file)[0] + '/train_cats_split.txt'), 'w') as fid:
fid.writelines('\n'.join(classes_values))
|
def show_detsB_boxes(im, dets_B, scale=1.0, save_file_path='temp.png'):
fig = plt.figure(1)
ff = 1.0
fig.set_size_inches(((ff * 8.5), (ff * 11)), forward=False)
plt.cla()
plt.axis('off')
plt.imshow(im)
for det in dets_B:
det_row = det[0]
cat_name = det[2]
bbox = (det_row[:4] * scale)
color = (rand(), rand(), rand())
rect = plt.Rectangle((bbox[0], bbox[1]), (bbox[2] - bbox[0]), (bbox[3] - bbox[1]), fill=False, edgecolor=color, linewidth=2.5)
plt.gca().add_patch(rect)
score = det_row[(- 1)]
plt.gca().text(bbox[0], bbox[1], '{:s} {:.3f}'.format(cat_name, score), bbox=dict(facecolor=color, alpha=0.5), fontsize=16, color='white')
|
def display_few_shot_test(benchmark, query_image):
import shutil
from FSD_engine import FSD_RepMet as DetectionEngine
from config.bench_config import bcfg, update_bench_config
import cv2
update_bench_config('../experiments/bench_configs/openimage_3_5_10_1.yaml')
(q_dets_B, q_dets_multi_B) = benchmark.det_eng.detect_on_image(query_image, num_candidates=5, cand_ver=1)
img = cv2.cvtColor(cv2.imread(query_image, (cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)), cv2.COLOR_BGR2RGB)
prob = [item for item in q_dets_B if (item[0][4] > 0.2)]
save_file_path = '/opt/DNN/dataset/retail/openimage_3_5_10_1/temp.jpg'
show_detsB_boxes(img, prob, save_file_path=save_file_path)
|
def Conv(incoming, num_filters, filter_size=3, stride=(1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
'\n Overrides the default parameters for ConvLayer\n '
ensure_set_name('conv', kwargs)
return ConvLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b, nonlinearity=nonlinearity, **kwargs)
|
class ConvPrelu(Layer):
def __init__(self, incoming, num_filters, filter_size=3, stride=(1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv_prelu', kwargs)
super(ConvPrelu, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_filters, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.prelu = prelu(self.conv, **kwargs)
self.params = self.conv.params.copy()
self.params.update(self.prelu.params)
def get_output_for(self, input, **kwargs):
out_conv = self.conv.get_output_for(input)
out_prelu = self.prelu.get_output_for(out_conv)
return out_prelu
def get_output_shape_for(self, input, **kwargs):
return self.conv.get_output_shape_for(input)
|
class ConvAggr(Layer):
def __init__(self, incoming, num_channels, filter_size=3, stride=(1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv_aggr', kwargs)
super(ConvAggr, self).__init__(incoming, **kwargs)
self.conv = Conv(incoming, num_channels, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.params = self.conv.params.copy()
def get_output_for(self, input, **kwargs):
return self.conv.get_output_for(input)
def get_output_shape_for(self, input_shape):
return self.conv.get_output_shape_for(input_shape)
|
def Conv3D(incoming, num_filters, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=lasagne.init.Constant(), nonlinearity=lasagne.nonlinearities.rectify, **kwargs):
'\n Overrides the default parameters for Conv3DLayer\n '
ensure_set_name('conv3d', kwargs)
return Conv3DLayer(incoming, num_filters, filter_size, stride, pad, W=W, b=b, nonlinearity=nonlinearity, **kwargs)
|
class Conv3DPrelu(Layer):
def __init__(self, incoming, num_filters, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv3d_prelu', kwargs)
super(Conv3DPrelu, self).__init__(incoming, **kwargs)
self.conv = Conv3D(incoming, num_filters, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.prelu = prelu(self.conv, **kwargs)
self.params = self.conv.params.copy()
self.params.update(self.prelu.params)
def get_output_for(self, input, **kwargs):
out_conv = self.conv.get_output_for(input)
out_prelu = self.prelu.get_output_for(out_conv)
return out_prelu
def get_output_shape_for(self, input, **kwargs):
return self.conv.get_output_shape_for(input)
|
class Conv3DAggr(Layer):
def __init__(self, incoming, num_channels, filter_size=3, stride=(1, 1, 1), pad='same', W=lasagne.init.HeNormal(), b=None, **kwargs):
ensure_set_name('conv3d_aggr', kwargs)
super(Conv3DAggr, self).__init__(incoming, **kwargs)
self.conv = Conv3D(incoming, num_channels, filter_size, stride, pad=pad, W=W, b=b, nonlinearity=None, **kwargs)
self.params = self.conv.params.copy()
def get_output_for(self, input, **kwargs):
return self.conv.get_output_for(input)
def get_output_shape_for(self, input_shape):
return self.conv.get_output_shape_for(input_shape)
|
class DataConsistencyLayer(MergeLayer):
'\n Data consistency layer\n '
def __init__(self, incomings, inv_noise_level=None, **kwargs):
super(DataConsistencyLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: 2 4d tensors, first is data, second is the k-space samples\n\n Returns\n ------------------------------\n output: 4d tensor, data input with entries replaced with sampled vals\n '
x = inputs[0]
x_sampled = inputs[1]
v = self.inv_noise_level
if v:
out = ((x + (v * x_sampled)) / (1 + v))
else:
mask = T.set_subtensor(x_sampled[T.neq(x_sampled, 0).nonzero()], 1)
out = (((1 - mask) * x) + x_sampled)
return out
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
|
class DataConsistencyWithMaskLayer(MergeLayer):
'\n Data consistency layer\n '
def __init__(self, incomings, inv_noise_level=None, **kwargs):
super(DataConsistencyWithMaskLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: 3 4d tensors\n First is data, second is the mask, third is the k-space samples\n\n Returns\n ------------------------------\n output: 4d tensor, data input with entries replaced with the sampled\n values\n '
x = inputs[0]
mask = inputs[1]
x_sampled = inputs[2]
v = self.inv_noise_level
if v:
out = ((x + (v * x_sampled)) / (1 + v))
else:
out = (((1 - mask) * x) + x_sampled)
return out
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
|
class DCLayer(MergeLayer):
'\n Data consistency layer\n '
def __init__(self, incomings, data_shape, inv_noise_level=None, **kwargs):
if ('name' not in kwargs):
kwargs['name'] = 'dc'
super(DCLayer, self).__init__(incomings, **kwargs)
self.inv_noise_level = inv_noise_level
(data, mask, sampled) = incomings
self.data = data
self.mask = mask
self.sampled = sampled
self.dft2 = FFT2Layer(data, data_shape, name='dc_dft2')
self.dc = DataConsistencyWithMaskLayer([self.dft2, mask, sampled], name='dc_consistency')
self.idft2 = FFT2Layer(self.dc, data_shape, inv=True, name='dc_idft2')
def get_output_for(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
x_sampled = inputs[2]
return get_output(self.idft2, {self.data: x, self.mask: mask, self.sampled: x_sampled})
def get_output_shape_for(self, input_shapes, **kwargs):
return input_shapes[0]
|
def ensure_set_name(default_name, kwargs):
"Ensure that the parameters contain names. Be careful, kwargs need to be\n passed as a dictionary here\n\n Parameters\n ----------\n default_name: string\n default name to set if neither name or pr is present, or if name is not\n present but pr is, the name becomes ``pr+default_name''\n kwargs: dict\n keyword arguments given to functions\n\n Returns\n -------\n kwargs: dict\n "
if ('name' not in kwargs):
raise Warning("You need to name the layers, otherwise it simply won't work")
global id_ctr
if (('name' in kwargs) and ('pr' in kwargs)):
kwargs['name'] = (kwargs['pr'] + kwargs['name'])
elif (('name' not in kwargs) and ('pr' in kwargs)):
idx = next(id_ctr)
kwargs['name'] = (((kwargs['pr'] + default_name) + '_g') + str(idx))
elif ('name' not in kwargs):
idx = next(id_ctr)
kwargs['name'] = ((default_name + '_g') + str(idx))
return kwargs
|
def get_dc_input_layers(shape):
'\n Creates input layer for the CNN. Works for 2D and 3D input.\n\n Returns\n -------\n net: Ordered Dictionary\n net config with 3 entries: input, kspace_input, mask.\n '
if (len(shape) > 4):
input_var = tensor5('input_var')
kspace_input_var = tensor5('kspace_input_var')
mask_var = tensor5('mask')
else:
input_var = T.tensor4('input_var')
kspace_input_var = T.tensor4('kspace_input_var')
mask_var = T.tensor4('mask')
input_layer = InputLayer(shape, input_var=input_var, name='input')
kspace_input_layer = InputLayer(shape, input_var=kspace_input_var, name='kspace_input')
mask_layer = InputLayer(shape, input_var=mask_var, name='mask')
return (input_layer, kspace_input_layer, mask_layer)
|
def roll_and_sum(prior_result, orig):
res = (prior_result + orig)
res = T.roll(res, 1, axis=(- 1))
return res
|
class KspaceFillNeighbourLayer(MergeLayer):
'\n k-space fill layer - The input data is assumed to be in k-space grid.\n\n The input data is assumed to be in k-space grid.\n This layer should be invoked from AverageInKspaceLayer\n '
def __init__(self, incomings, frame_dist=range(5), divide_by_n=False, **kwargs):
super(KspaceFillNeighbourLayer, self).__init__(incomings, **kwargs)
self.frame_dist = frame_dist
n_samples = [(1 + (2 * i)) for i in self.frame_dist]
self.n_samples = n_samples
self.divide_by_n = divide_by_n
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)\n\n Returns\n ------------------------------\n output: 5d tensor, missing lines of k-space are filled using neighbouring frames.\n shape becomes (n* (len(frame_dist), 2, nx, ny, nt)\n '
x = inputs[0]
mask = inputs[1]
(result, _) = theano.scan(fn=roll_and_sum, outputs_info=T.zeros_like(x), non_sequences=x, n_steps=T.constant(np.max(self.n_samples)))
(mask_result, _) = theano.scan(fn=roll_and_sum, outputs_info=T.zeros_like(x), non_sequences=mask, n_steps=T.constant(np.max(self.n_samples)))
results = [x]
for (i, t) in enumerate(self.n_samples):
if self.divide_by_n:
c = float(t)
else:
c = 1.0
acc = result[(t - 1)]
mask_acc = mask_result[(t - 1)]
avg = T.roll((acc / T.maximum(c, mask_acc)), ((- self.frame_dist[i]) - 1), axis=(- 1))
res = ((avg * (1 - mask)) + (x * mask))
results.append(res)
return T.concatenate(results, axis=1)
def get_output_shape_for(self, input_shapes, **kwargs):
(n, nc, nx, ny, nt) = input_shapes[0]
nc_new = ((len(self.frame_dist) + 1) * nc)
return (n, nc_new, nx, ny, nt)
|
class KspaceFillNeighbourLayer_Clipped(MergeLayer):
'\n k-space fill layer with clipping at the edge.\n\n The input data is assumed to be in k-space grid.\n This layer should be invoked from AverageInKspaceLayer\n '
def __init__(self, incomings, nt, frame_dist=range(5), divide_by_n=False, **kwargs):
super(KspaceFillNeighbourLayer_Clipped, self).__init__(incomings, **kwargs)
self.frame_dist = frame_dist
n_samples = [(1 + (2 * i)) for i in self.frame_dist]
self.n_samples = n_samples
self.divide_by_n = divide_by_n
self.nt = nt
def get_output_for(self, inputs, **kwargs):
'\n\n Parameters\n ------------------------------\n inputs: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)\n\n Returns\n ------------------------------\n output: 5d tensor, missing lines of k-space are filled using neighbouring frames.\n shape becomes (n* (len(frame_dist), 2, nx, ny, nt)\n '
x = inputs[0]
mask = inputs[1]
results = [x]
for (i, t) in enumerate(self.n_samples):
dist = (t / 2)
if self.divide_by_n:
c = float(t)
else:
c = 1.0
def fn(i, input):
s = slice(T.maximum(0, (i - dist)), T.minimum(self.nt, ((i + dist) + 1)))
return input[(..., s)].sum(axis=(- 1))
(result, _) = theano.scan(fn, non_sequences=x, sequences=np.arange(self.nt))
(mask_result, _) = theano.scan(fn, non_sequences=mask, sequences=np.arange(self.nt))
acc = T.transpose(result, axes=(1, 2, 3, 4, 0))
mask_acc = T.transpose(mask_result, axes=(1, 2, 3, 4, 0))
avg = (acc / T.maximum(c, mask_acc))
res = ((avg * (1 - mask)) + (x * mask))
results.append(res)
return T.concatenate(results, axis=1)
def get_output_shape_for(self, input_shapes, **kwargs):
(n, nc, nx, ny, nt) = input_shapes[0]
nc_new = ((len(self.frame_dist) + 1) * nc)
return (n, nc_new, nx, ny, nt)
|
class AverageInKspaceLayer(MergeLayer):
'\n Average-in-k-space layer\n\n First transforms the representation in Fourier domain,\n then performs averaging along temporal axis, then transforms back to image\n domain. Works only for 5D tensor (see parameter descriptions).\n\n\n Parameters\n -----------------------------\n incomings: two 5d tensors, [kspace_data, mask], each of shape (n, 2, nx, ny, nt)\n\n data_shape: shape of the incoming tensors: (n, 2, nx, ny, nt) (This is for convenience)\n\n frame_dist: a list of distances of neighbours to sample for each averaging channel\n if frame_dist=[1], samples from [-1, 1] for each temporal frames\n if frame_dist=[3, 5], samples from [-3,-2,...,0,1,...,3] for one,\n [-5,-4,...,0,1,...,5] for the second one\n\n divide_by_n: bool - Decides how averaging will be done.\n True => divide by number of neighbours (=#2*frame_dist+1)\n False => divide by number of nonzero contributions\n\n clipped: bool - By default the layer assumes periodic boundary condition along temporal axis.\n True => Averaging will be clipped at the boundary, no circular references.\n False => Averages with circular referencing (i.e. at t=0, gets contribution from t=nt-1, so on).\n\n Returns\n ------------------------------\n output: 5d tensor, missing lines of k-space are filled using neighbouring frames.\n shape becomes (n* (len(frame_dist)), 2, nx, ny, nt)\n '
def __init__(self, incomings, data_shape, frame_dist=[1, 3, 5], divide_by_n=False, clipped=False, **kwargs):
if ('name' not in kwargs):
kwargs['name'] = 'kspace_averaging_layer'
super(AverageInKspaceLayer, self).__init__(incomings, **kwargs)
(data, mask) = incomings
(n, nc, nx, ny, nt) = data_shape
nc_new = ((len(frame_dist) + 1) * 2)
self.data = data
self.mask = mask
self.frame_dist = frame_dist
self.divide_by_n = divide_by_n
self.dft2 = FFT2Layer(data, data_shape, name='kavg_dft2')
if clipped:
self.kavg = KspaceFillNeighbourLayer_Clipped([self.dft2, mask], nt, frame_dist, divide_by_n, name='kavg_avg')
else:
self.kavg = KspaceFillNeighbourLayer([self.dft2, mask], frame_dist, divide_by_n, name='kavg_avg')
self.kavg_tmp = lasagne.layers.reshape(self.kavg, ((- 1), 2, nx, ny, nt))
self.idft2 = FFT2Layer(self.kavg_tmp, data_shape, inv=True, name='kavg_idft2')
self.out = lasagne.layers.reshape(self.idft2, ((- 1), nc_new, nx, ny, nt))
def get_output_for(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
res = get_output(self.out, {self.data: x, self.mask: mask})
return res
def get_output_shape_for(self, input_shapes, **kwargs):
return self.kavg.get_output_shape_for(input_shapes)
|
class PoolNDLayer(Layer):
"\n ND pooling layer\n\n Performs ND mean or max-pooling over the trailing axes\n of a ND input tensor.\n\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n\n pool_size : integer or iterable\n The length of the pooling region in each dimension. If an integer, it\n is promoted to a square pooling region. If an iterable, it should have\n n elements.\n\n stride : integer, iterable or ``None``\n The strides between sucessive pooling regions in each dimension.\n If ``None`` then ``stride = pool_size``.\n\n pad : integer or iterable\n Number of elements to be added on each side of the input\n in each dimension. Each value must be less than\n the corresponding stride.\n\n ignore_border : bool\n If ``True``, partial pooling regions will be ignored.\n Must be ``True`` if ``pad != (0, 0)``.\n\n mode : {'max', 'average_inc_pad', 'average_exc_pad'}\n Pooling mode: max-pooling or mean-pooling including/excluding zeros\n from partially padded pooling regions. Default is 'max'.\n\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n\n See Also\n --------\n MaxPool2DLayer : Shortcut for max pooling layer.\n\n Notes\n -----\n The value used to pad the input is chosen to be less than\n the minimum of the input, so that the output of each pooling region\n always corresponds to some element in the unpadded input region.\n\n Using ``ignore_border=False`` prevents Theano from using cuDNN for the\n operation, so it will fall back to a slower implementation.\n "
def __init__(self, incoming, n, pool_size, stride=None, pad=0, ignore_border=True, mode='max', **kwargs):
super(PoolNDLayer, self).__init__(incoming, **kwargs)
self.n = n
self.pool_size = lasagne.utils.as_tuple(pool_size, n)
if (stride is None):
self.stride = self.pool_size
else:
self.stride = lasagne.utils.as_tuple(stride, n)
self.pad = lasagne.utils.as_tuple(pad, n)
self.ignore_border = ignore_border
self.mode = mode
if ((n % 2) == 1):
self.pool_size += (1,)
self.pad += (0,)
self.stride += (1,)
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
tr = (len(output_shape) - self.n)
for i in xrange(self.n):
output_shape[(tr + i)] = pool.pool_output_length(input_shape[(tr + i)], pool_size=self.pool_size[i], stride=self.stride[i], pad=self.pad[i], ignore_border=self.ignore_border)
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
' Uses pool_2d to pool each dimension.'
input_shape = input.shape
n = self.n
if ((n % 2) == 1):
n += 1
input = T.shape_padright(input, 1)
n_axis = (input.ndim - n)
for i in np.arange(0, n, 2):
i1 = (((n - 2) + i) % n)
i2 = (((n - 1) + i) % n)
input = pool.pool_2d(input, ds=(self.pool_size[i1], self.pool_size[i2]), st=(self.stride[i1], self.stride[i2]), ignore_border=self.ignore_border, padding=(self.pad[i1], self.pad[i2]), mode=self.mode)
fixed = tuple(np.arange(n_axis))
perm = tuple(((np.arange(2, (n + 2)) % n) + n_axis))
shuffle = (fixed + perm)
input = input.dimshuffle(shuffle)
input = input.reshape(self.get_output_shape_for(input_shape))
return input
|
class Upscale3DLayer(Layer):
'\n 3D upscaling layer\n Performs 3D upscaling over the two trailing axes of a 4D input tensor.\n Parameters\n ----------\n incoming : a :class:`Layer` instance or tuple\n The layer feeding into this layer, or the expected input shape.\n scale_factor : integer or iterable\n The scale factor in each dimension. If an integer, it is promoted to\n a square scale factor region. If an iterable, it should have two\n elements.\n **kwargs\n Any additional keyword arguments are passed to the :class:`Layer`\n superclass.\n '
def __init__(self, incoming, scale_factor, **kwargs):
super(Upscale3DLayer, self).__init__(incoming, **kwargs)
self.scale_factor = lasagne.utils.as_tuple(scale_factor, 3)
if ((self.scale_factor[0] < 1) or (self.scale_factor[1] < 1) or (self.scale_factor[2] < 1)):
raise ValueError('Scale factor must be >= 1, not {0}'.format(self.scale_factor))
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if (output_shape[2] is not None):
output_shape[2] *= self.scale_factor[0]
if (output_shape[3] is not None):
output_shape[3] *= self.scale_factor[1]
if (output_shape[4] is not None):
output_shape[4] *= self.scale_factor[2]
return tuple(output_shape)
def get_output_for(self, input, **kwargs):
(a, b, c) = self.scale_factor
upscaled = input
if (c > 1):
upscaled = T.extra_ops.repeat(upscaled, b, 4)
if (b > 1):
upscaled = T.extra_ops.repeat(upscaled, b, 3)
if (a > 1):
upscaled = T.extra_ops.repeat(upscaled, a, 2)
return upscaled
|
class IdLayer(Layer):
def get_output_for(self, input, **kwargs):
return input
|
class SumLayer(Layer):
def get_output_for(self, input, **kwargs):
return input.sum(axis=(- 1))
def get_output_shape_for(self, input_shape):
return input_shape[:(- 1)]
|
class SHLULayer(Layer):
def get_output_for(self, input, **kwargs):
return (T.sgn(input) * T.maximum((input - 1), 0))
|
class ResidualLayer(lasagne.layers.ElemwiseSumLayer):
'\n Residual Layer, which just wraps around ElemwiseSumLayer\n '
def __init__(self, incomings, **kwargs):
ensure_set_name('res', kwargs)
super(ResidualLayer, self).__init__(incomings, **kwargs)
input_names = []
for l in incomings:
if isinstance(l, lasagne.layers.InputLayer):
input_names.append((l.name if l.name else l.input_var.name))
elif l.name:
input_names.append(l.name)
else:
input_names.append(str(l))
self.input_names = input_names
def get_output_for(self, inputs, **kwargs):
return super(lasagne.layers.ElemwiseSumLayer, self).get_output_for(inputs, **kwargs)
|
def cascade_resnet(pr, net, input_layer, n=5, nf=64, b=lasagne.init.Constant, **kwargs):
shape = lasagne.layers.get_output_shape(input_layer)
n_channel = shape[1]
net[(pr + 'conv1')] = l.Conv(input_layer, nf, 3, b=b(), name=(pr + 'conv1'))
for i in xrange(2, n):
net[(pr + ('conv%d' % i))] = l.Conv(net[(pr + ('conv%d' % (i - 1)))], nf, 3, b=b(), name=(pr + ('conv%d' % i)))
net[(pr + 'conv_aggr')] = l.ConvAggr(net[(pr + ('conv%d' % (n - 1)))], n_channel, 3, b=b(), name=(pr + 'conv_aggr'))
net[(pr + 'res')] = l.ResidualLayer([net[(pr + 'conv_aggr')], input_layer], name=(pr + 'res'))
output_layer = net[(pr + 'res')]
return (net, output_layer)
|
def cascade_resnet_3d_avg(pr, net, input_layer, n=5, nf=64, b=lasagne.init.Constant, frame_dist=range(5), **kwargs):
shape = lasagne.layers.get_output_shape(input_layer)
n_channel = shape[1]
divide_by_n = (kwargs['cascade_i'] != 0)
k = (3, 3, 3)
net[(pr + 'kavg')] = l.AverageInKspaceLayer([input_layer, net['mask']], shape, frame_dist=frame_dist, divide_by_n=divide_by_n, clipped=False)
net[(pr + 'conv1')] = l.Conv3D(net[(pr + 'kavg')], nf, k, b=b(), name=(pr + 'conv1'))
for i in xrange(2, n):
net[(pr + ('conv%d' % i))] = l.Conv3D(net[(pr + ('conv%d' % (i - 1)))], nf, k, b=b(), name=(pr + ('conv%d' % i)))
net[(pr + 'conv_aggr')] = l.Conv3DAggr(net[(pr + ('conv%d' % (n - 1)))], n_channel, k, b=b(), name=(pr + 'conv_aggr'))
net[(pr + 'res')] = l.ResidualLayer([net[(pr + 'conv_aggr')], input_layer], name=(pr + 'res'))
output_layer = net[(pr + 'res')]
return (net, output_layer)
|
def build_cascade_cnn_from_list(shape, net_meta, lmda=None):
'\n Create iterative network with more flexibility\n\n net_meta: [(model1, cascade1_n),(model2, cascade2_n),....(modelm, cascadem_n),]\n '
if (not net_meta):
raise
net = OrderedDict()
(input_layer, kspace_input_layer, mask_layer) = l.get_dc_input_layers(shape)
net['input'] = input_layer
net['kspace_input'] = kspace_input_layer
net['mask'] = mask_layer
j = 0
for (cascade_net, cascade_n) in net_meta:
for i in xrange(cascade_n):
pr = ('c%d_' % j)
(net, output_layer) = cascade_net(pr, net, input_layer, **{'cascade_i': j})
net[(pr + 'dc')] = l.DCLayer([output_layer, net['mask'], net['kspace_input']], shape, inv_noise_level=lmda)
input_layer = net[(pr + 'dc')]
j += 1
output_layer = input_layer
return (net, output_layer)
|
def build_d2_c2(shape):
def cascade_d2(pr, net, input_layer, **kwargs):
return cascade_resnet(pr, net, input_layer, n=2)
return build_cascade_cnn_from_list(shape, [(cascade_d2, 2)])
|
def build_d5_c5(shape):
return build_cascade_cnn_from_list(shape, [(cascade_resnet, 5)])
|
def build_d2_c2_s(shape):
def cascade_d2(pr, net, input_layer, **kwargs):
return cascade_resnet_3d_avg(pr, net, input_layer, n=2, nf=16, frame_dist=range(2), **kwargs)
return build_cascade_cnn_from_list(shape, [(cascade_d2, 2)])
|
def build_d5_c10_s(shape):
return build_cascade_cnn_from_list(shape, [(cascade_resnet_3d_avg, 10)])
|
class FFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 3):
raise TypeError((('%s: input must have dimension >= 3, with ' % self.__class__.__name__) + 'first dimension batches and last real/imag parts'))
if (s is None):
s = a.shape[(- 2)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
a_in = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.fft(a_in)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class IFFTOp(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 3):
raise TypeError((('%s: input must have dimension >= 3, with ' % self.__class__.__name__) + 'first dimension batches and last real/imag parts'))
if (s is None):
s = a.shape[(- 2)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
inp = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.ifft(inp)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = (out * s).astype(a.dtype)
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = fft_op(gout, s)
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def fft(inp, norm=None):
"\n Performs the fast Fourier transform of a complex-valued input simulated by R^2.\n\n The input must be a real-valued variable of dimensions (m, ..., n, 2).\n It performs FFTs of size n along the last axis. \n\n The output is a tensor of dimensions (m, ..., n, 2).\n The real and imaginary parts are stored as a pair of\n float arrays.\n\n Parameters\n ----------\n inp\n Array of floats of size (m, ..., n, 2)\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 2)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.astype(inp.dtype))
return (fft_op(inp, s) / scaling)
|
def ifft(inp, norm=None):
"\n Performs the inverse fast Fourier Transform with complex-valued input simulated by R^2.\n\n The input is a variable of dimensions (m, ..., n, 2)\n The real and imaginary parts are stored as a\n pair of float arrays.\n\n The output is a real-valued variable of dimensions (m, ..., n, 2)\n giving the inverse FFTs along the last axis.\n\n Parameters\n ----------\n inp\n Array of size (m, ..., n, 2), containing m inputs\n\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 2)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.astype(inp.dtype)
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.astype(inp.dtype))
return (ifft_op(inp, s) / scaling)
|
def _unitary(norm):
if (norm not in (None, 'ortho', 'no_norm')):
raise ValueError(("Invalid value %s for norm, must be None, 'ortho' or 'no norm'" % norm))
return norm
|
class FFT2Op(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 4):
raise TypeError((('%s: input must have dimension >= 4, with ' % self.__class__.__name__) + 'first dimension batches, then last axes are (Nx, Ny, 2)'))
if (s is None):
s = a.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
a_in = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.fft2(a_in)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = out
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
return [ifft2_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
class IFFT2Op(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 4):
raise TypeError((('%s: input must have dimension >= 4, with ' % self.__class__.__name__) + 'first dimension batches, then last axes are (Nx, Ny, 2)'))
if (s is None):
s = a.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
inp = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.ifft2(inp)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = (out * s.prod()).astype(a.dtype)
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = fft2_op(gout, s)
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]]
|
def fft2(inp, norm=None):
"\n Performs the fast Fourier transform of a complex-valued input simulated by R^2.\n\n The input must be a real-valued variable of dimensions (m, ..., n, 2).\n It performs FFT2s of size n along the last axis. \n\n The output is a tensor of dimensions (m, ..., n, 2).\n The real and imaginary parts are stored as a pair of\n float arrays.\n\n Parameters\n ----------\n inp\n Array of floats of size (m, ..., n, 2)\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype(inp.dtype))
return (fft2_op(inp, s) / scaling)
|
def ifft2(inp, norm=None):
"\n Performs the inverse fast Fourier Transform with complex-valued input simulated by R^2.\n\n The input is a variable of dimensions (m, ..., n, 2)\n The real and imaginary parts are stored as a\n pair of float arrays.\n\n The output is a real-valued variable of dimensions (m, ..., n, 2)\n giving the inverse FFT2s along the last axis.\n\n Parameters\n ----------\n inp\n Array of size (m, ..., n, 2), containing m inputs\n with n//2+1 non-trivial elements on the last dimension and real\n and imaginary parts stored as separate real arrays.\n norm : {None, 'ortho', 'no_norm'}\n Normalization of transform. Following numpy, default *None* normalizes\n only the inverse transform by n, 'ortho' yields the unitary transform\n (:math:`1/\\sqrt n` forward and inverse). In addition, 'no_norm' leaves\n the transform unnormalized.\n\n "
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm is None):
scaling = s.prod().astype(inp.dtype)
elif (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype(inp.dtype))
return (ifft2_op(inp, s) / scaling)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.