code
stringlengths
17
6.64M
def resnet18(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (2, 2, 2, 2), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet34(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 6, 3), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet50(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet101(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse) x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse) return x
def resnet101_aspp(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse) x_aspp = [] for d in (6, 12, 18, 24): x_aspp.append(Conv(x, num_cls, kernel=(3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc1_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse)) x = sum(x_aspp) return x
def vgg16(x, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, (2, 2), name=(name + 'pool1')) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, (2, 2), name=(name + 'pool2')) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, (2, 2), name=(name + 'pool3')) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, (2, 2), name=(name + 'pool4')) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv5_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv5_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv5_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, (2, 2), name=(name + 'pool5')) x = Flatten(x, name=(name + 'flatten')) x = FCRelu(x, num_hidden=4096, name=(name + 'fc6'), lr_mult=lr_mult, reuse=reuse) x = Drop(x, p=0.5, name=(name + 'drop6')) x = FCRelu(x, num_hidden=4096, name=(name + 'fc7'), lr_mult=lr_mult, reuse=reuse) x = Drop(x, p=0.5, name=(name + 'drop7')) return x
def vgg16_deeplab(x, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool1')) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool2')) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool3')) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool4')) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5')) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5a'), pool_type='avg') return x
def vgg16_largefov(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x = ConvRelu(x, 1024, (3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc6'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop6')) x = ConvRelu(x, 1024, (1, 1), name=(name + 'fc7'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop7')) x = Conv(x, num_cls, (1, 1), name=(name + 'fc8'), lr_mult=lr_mult, reuse=reuse) return x
def vgg16_aspp(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x_backbone = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x_aspp = [] for d in (6, 12, 18, 24): x = ConvRelu(x_backbone, 1024, (3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc6_aspp%d' % d)), reuse=reuse) x = Drop(x, 0.5) x = ConvRelu(x, 1024, (1, 1), name=(name + ('fc7_aspp%d' % d)), reuse=reuse) x = Drop(x, 0.5) x = Conv(x, num_cls, (1, 1), name=(name + ('fc8_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse) x_aspp.append(x) x = sum(x_aspp) return x
def wResStem(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) if bn_data: x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse) else: x = data x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name=(name + 'conv1a'), lr_mult=lr_mult, reuse=reuse) return x
def wResUnit(data, num_filter, stride, dilate, projection, bottle_neck, dropout=0, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None, **kwargs): assert (name is not None) x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse) if projection: shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch1'), lr_mult=lr_mult, reuse=reuse) else: shortcut = data if bottle_neck: x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse) if (dropout > 0): x = Drop(x, p=dropout) x = Conv(x, num_filter=int((num_filter / 2.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b2'), lr_mult=lr_mult, reuse=reuse) if (dropout > 0): x = Drop(x, p=dropout) x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch2b2'), lr_mult=lr_mult, reuse=reuse) else: mid_filter = kwargs.get('mid_filter', num_filter) fst_dilate = kwargs.get('fst_dilate', dilate) x = Conv(x, num_filter=mid_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((fst_dilate,) * 2), dilate=((fst_dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse) x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse) x = (x + shortcut) return x
def wResBlock(data, num_unit, num_filter, stride, dilate, bottle_neck, dropout=0, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None, **kwargs): assert (name is not None) x = wResUnit(data, num_filter, stride, dilate, True, bottle_neck, dropout, momentum, eps, use_global_stats, name=(name + 'a'), lr_mult=lr_mult, reuse=reuse, **kwargs) for i in range(1, num_unit): x = wResUnit(x, num_filter, 1, dilate, False, bottle_neck, dropout, momentum, eps, use_global_stats, name=(name + ('b%d' % i)), lr_mult=lr_mult, reuse=reuse, **kwargs) return x
def wresnet38(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, out_internals=False, lr_mult=1, reuse=None): name = ('' if (name is None) else name) internals = [] x = wResStem(x, 64, momentum, eps, use_global_stats, bn_data=True, name=name, lr_mult=lr_mult, reuse=reuse) x = wResBlock(x, 3, 128, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '2'), lr_mult, reuse) x = wResBlock(x, 3, 256, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '3'), lr_mult, reuse) x = wResBlock(x, 6, 512, 2, 1, False, 0, momentum, eps, use_global_stats, (name + '4'), lr_mult, reuse) x = wResBlock(x, 3, 1024, 1, 2, False, 0, momentum, eps, use_global_stats, (name + '5'), lr_mult, reuse, mid_filter=512, fst_dilate=1) internals.append(x) x = wResBlock(x, 1, 2048, 1, 4, True, 0.3, momentum, eps, use_global_stats, (name + '6'), lr_mult, reuse) internals.append(x) x = wResBlock(x, 1, 4096, 1, 4, True, 0.5, momentum, eps, use_global_stats, (name + '7'), lr_mult, reuse) internals.append(x) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn7'), lr_mult=lr_mult, reuse=reuse) if out_internals: return (x, internals) else: return x
def MultiScale(scales): scales = [s for s in scales if (s != 1)] def func_wrapper(model_func): def model_func_ms(*args, **kwargs): assert (len(args) > 0), 'Cannot find input variable' input_var = args[0] args = args[1:] out_0 = model_func(*((input_var,) + args), **kwargs) assert (len(out_0) == 1), 'Only single output implemented' reuse = kwargs.get('reuse', None) if (reuse is None): reuse = out_0 if ('reuse' in kwargs): del kwargs['reuse'] is_tensor4d = (len(out_0.infer_shape(data=(1, 3, 100, 100))[1][0]) == 4) out_ms = [out_0] for scale in scales: input_var_s = mx.sym.Custom(input_var, scale=scale, op_type='BilinearScale') out_s = model_func(*((input_var_s,) + args), reuse=reuse, **kwargs) if is_tensor4d: out_s = mx.sym.Custom(out_s, out_0, op_type='BilinearScaleLike') out_ms.append(out_s) out_max = out_ms[0] for out_s in out_ms[1:]: out_max = mx.sym.maximum(out_max, out_s) out_ms.append(out_max) return mx.sym.Group(out_ms) return model_func_ms return func_wrapper
def ResStem(data, num_filter, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) if bn_data: x = BN(data, fix_gamma=True, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn_data'), reuse=reuse) else: x = data x = Conv(x, num_filter=num_filter, kernel=(7, 7), stride=(2, 2), pad=(3, 3), no_bias=True, name=(name + 'conv0'), lr_mult=lr_mult, reuse=reuse) x = BN(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn0'), lr_mult=lr_mult, reuse=reuse) x = Relu(x, name=(name + 'relu0')) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name=(name + 'pool0')) return x
def ResUnit(data, num_filter, stride, dilate, projection, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): assert (name is not None) x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn1'), lr_mult=lr_mult, reuse=reuse) if projection: shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(name + '_sc'), lr_mult=lr_mult, reuse=reuse) else: shortcut = data if bottle_neck: x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse) x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn3'), lr_mult=lr_mult, reuse=reuse) x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(name + '_conv3'), lr_mult=lr_mult, reuse=reuse) else: x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(name + '_conv1'), lr_mult=lr_mult, reuse=reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + '_bn2'), lr_mult=lr_mult, reuse=reuse) x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=(1, 1), no_bias=True, name=(name + '_conv2'), lr_mult=lr_mult, reuse=reuse) x = (x + shortcut) return x
def ResBlock(data, num_unit, num_filter, stride, dilate, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): assert (name is not None) x = ResUnit(data, num_filter, stride, dilate, True, bottle_neck, momentum, eps, use_global_stats, (name + '_unit1'), lr_mult, reuse) for i in range(1, num_unit): x = ResUnit(x, num_filter, 1, dilate, False, bottle_neck, momentum, eps, use_global_stats, (name + ('_unit%d' % (i + 1))), lr_mult, reuse) return x
def _Resnet(x, num_units, num_filters, bottle_neck, momentum=0.9, eps=1e-05, use_global_stats=False, bn_data=True, strides=(1, 2, 2, 2), dilates=(1, 1, 1, 1), name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = ResStem(x, num_filters[0], momentum, eps, use_global_stats, bn_data, name, lr_mult, reuse) for i in range(4): x = ResBlock(x, num_units[i], num_filters[(i + 1)], strides[i], dilates[i], bottle_neck, momentum, eps, use_global_stats, (name + ('stage%d' % (i + 1))), lr_mult, reuse) x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(name + 'bn1'), lr_mult=lr_mult, reuse=reuse) return x
def resnet18(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (2, 2, 2, 2), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet34(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 6, 3), (64, 64, 128, 256, 512), False, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet50(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet101(x, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, name=name, lr_mult=lr_mult, reuse=reuse) x = Pool(x, (1, 1), pool_type='avg', global_pool=True) x = Flatten(x) return x
def resnet50_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 6, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse) x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse) return x
def resnet101_largefov(x, num_cls, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = _Resnet(x, (3, 4, 23, 3), (64, 256, 512, 1024, 2048), True, momentum, eps, use_global_stats, strides=(1, 2, 1, 1), dilates=(1, 1, 2, 4), name=name, lr_mult=1, reuse=reuse) x = Conv(x, num_cls, kernel=(3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc1'), lr_mult=lr_mult, reuse=reuse) return x
class _VOC_proto(object): @staticmethod def _get_palette(): def bitget(bit, idx): return ((bit & (1 << idx)) > 0) cmap = [] for i in range(256): (r, g, b) = (0, 0, 0) idx = i for j in range(8): r = (r | (bitget(idx, 0) << (7 - j))) g = (g | (bitget(idx, 1) << (7 - j))) b = (b | (bitget(idx, 2) << (7 - j))) idx = (idx >> 3) cmap.append((b, g, r)) return np.array(cmap).astype(np.uint8) def __init__(self): self.categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] self.palette = self._get_palette() def name2index(self, name): return self.categories.index(name) def index2name(self, index): return self.categories[index] def get_annotation(self, filename, use_diff=False): tree = ET.parse(filename) root = tree.getroot() annotation = [] tmp_annotation = [] for obj in root.findall('object'): cat = obj.find('name').text non_diff = (1 - int(obj.find('difficult').text)) if (use_diff or non_diff): annotation.append(self.name2index(cat)) else: tmp_annotation.append(self.name2index(cat)) annotation = list(set(annotation)) if (len(annotation) == 0): annotation += list(set(tmp_annotation)) annotation.sort() return annotation
def imwrite(filename, image): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass cv2.imwrite(filename, image)
def npsave(filename, data): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass np.save(filename, data)
def pkldump(filename, data): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass with open(filename, 'wb') as f: pickle.dump(data, f)
def imhstack(images, height=None): images = as_list(images) images = list(map(image2C3, images)) if (height is None): height = np.array([img.shape[0] for img in images]).max() images = [resize_height(img, height) for img in images] if (len(images) == 1): return images[0] images = [[img, np.full((height, 3, 3), 255, np.uint8)] for img in images] images = np.hstack(sum(images, [])) return images
def imvstack(images, width=None): images = as_list(images) images = list(map(image2C3, images)) if (width is None): width = np.array([img.shape[1] for img in images]).max() images = [resize_width(img, width) for img in images] if (len(images) == 1): return images[0] images = [[img, np.full((3, width, 3), 255, np.uint8)] for img in images] images = np.vstack(sum(images, [])) return images
def as_list(data): if (not isinstance(data, (list, tuple))): return [data] return list(data)
def image2C3(image): if (image.ndim == 3): return image if (image.ndim == 2): return np.repeat(image[(..., np.newaxis)], 3, axis=2) raise ValueError('image.ndim = {}, invalid image.'.format(image.ndim))
def resize_height(image, height): if (image.shape[0] == height): return image (h, w) = image.shape[:2] width = ((height * w) // h) image = cv2.resize(image, (width, height)) return image
def resize_width(image, width): if (image.shape[1] == width): return image (h, w) = image.shape[:2] height = ((width * h) // w) image = cv2.resize(image, (width, height)) return image
def imtext(image, text, space=(3, 3), color=(0, 0, 0), thickness=1, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0): assert isinstance(text, str), type(text) size = cv2.getTextSize(text, fontFace, fontScale, thickness) image = cv2.putText(image, text, (space[0], (size[1] + space[1])), fontFace, fontScale, color, thickness) return image
def setGPU(gpus): len_gpus = len(gpus.split(',')) os.environ['CUDA_VISIBLE_DEVICES'] = gpus gpus = ','.join(map(str, range(len_gpus))) return gpus
def getTime(): return datetime.now().strftime('%m-%d %H:%M:%S')
class Timer(object): curr_record = None prev_record = None @classmethod def record(cls): cls.prev_record = cls.curr_record cls.curr_record = time.time() @classmethod def interval(cls): if (cls.prev_record is None): return 0 return (cls.curr_record - cls.prev_record)
def wrapColor(string, color): try: header = {'red': '\x1b[91m', 'green': '\x1b[92m', 'yellow': '\x1b[93m', 'blue': '\x1b[94m', 'purple': '\x1b[95m', 'cyan': '\x1b[96m', 'darkcyan': '\x1b[36m', 'bold': '\x1b[1m', 'underline': '\x1b[4m'}[color.lower()] except KeyError: raise ValueError('Unknown color: {}'.format(color)) return ((header + string) + '\x1b[0m')
def info(logger, msg, color=None): msg = ('[{}]'.format(getTime()) + msg) if (logger is not None): logger.info(msg) if (color is not None): msg = wrapColor(msg, color) print(msg)
def summaryArgs(logger, args, color=None): if isinstance(args, ModuleType): args = vars(args) keys = [key for key in args.keys() if (key[:2] != '__')] keys.sort() length = max([len(x) for x in keys]) msg = [(('{:<' + str(length)) + '}: {}').format(k, args[k]) for k in keys] msg = ('\n' + '\n'.join(msg)) info(logger, msg, color)
def loadParams(filename): data = mx.nd.load(filename) (arg_params, aux_params) = ({}, {}) for (name, value) in data.items(): if (name[:3] == 'arg'): arg_params[name[4:]] = value elif (name[:3] == 'aux'): aux_params[name[4:]] = value if (len(arg_params) == 0): arg_params = None if (len(aux_params) == 0): aux_params = None return (arg_params, aux_params)
class SaveParams(object): def __init__(self, model, snapshot, model_name, num_save=5): self.model = model self.snapshot = snapshot self.model_name = model_name self.num_save = num_save self.save_params = [] def save(self, n_epoch): self.save_params += [os.path.join(self.snapshot, '{}-{:04d}.params'.format(self.model_name, n_epoch)), os.path.join(self.snapshot, '{}-{:04d}.states'.format(self.model_name, n_epoch))] self.model.save_params(self.save_params[(- 2)]) self.model.save_optimizer_states(self.save_params[(- 1)]) if (len(self.save_params) > (2 * self.num_save)): call(['rm', self.save_params[0], self.save_params[1]]) self.save_params = self.save_params[2:] return self.save_params[(- 2):] def __call__(self, n_epoch): return self.save(n_epoch)
def getLogger(snapshot, model_name): if (not os.path.exists(snapshot)): os.makedirs(snapshot) logging.basicConfig(filename=os.path.join(snapshot, (model_name + '.log')), level=logging.INFO) logger = logging.getLogger() return logger
class LrScheduler(object): def __init__(self, method, init_lr, kwargs): self.method = method self.init_lr = init_lr if (method == 'step'): self.step_list = kwargs['step_list'] self.factor = kwargs['factor'] self.get = self._step elif (method == 'poly'): self.num_epoch = kwargs['num_epoch'] self.power = kwargs['power'] self.get = self._poly elif (method == 'ramp'): self.ramp_up = kwargs['ramp_up'] self.ramp_down = kwargs['ramp_down'] self.num_epoch = kwargs['num_epoch'] self.scale = kwargs['scale'] self.get = self._ramp else: raise ValueError(method) def _step(self, current_epoch): lr = self.init_lr step_list = [x for x in self.step_list] while ((len(step_list) > 0) and (current_epoch >= step_list[0])): lr *= self.factor del step_list[0] return lr def _poly(self, current_epoch): lr = (self.init_lr * ((1.0 - (float(current_epoch) / self.num_epoch)) ** self.power)) return lr def _ramp(self, current_epoch): if (current_epoch < self.ramp_up): decay = np.exp(((- ((1 - (float(current_epoch) / self.ramp_up)) ** 2)) * self.scale)) elif (current_epoch > (self.num_epoch - self.ramp_down)): decay = np.exp(((- ((float(((current_epoch + self.ramp_down) - self.num_epoch)) / self.ramp_down) ** 2)) * self.scale)) else: decay = 1.0 lr = (self.init_lr * decay) return lr
class GradBuffer(object): def __init__(self, model): self.model = model self.cache = None def write(self): if (self.cache is None): self.cache = [[(None if (g is None) else g.copyto(g.context)) for g in g_list] for g_list in self.model._exec_group.grad_arrays] else: for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache): for (g_src, g_dst) in zip(gs_src, gs_dst): if (g_src is None): continue g_src.copyto(g_dst) def read_add(self): assert (self.cache is not None) for (gs_src, gs_dst) in zip(self.model._exec_group.grad_arrays, self.cache): for (g_src, g_dst) in zip(gs_src, gs_dst): if (g_src is None): continue g_src += g_dst
def initNormal(mean, std, name, shape): if name.endswith('_weight'): return mx.nd.normal(mean, std, shape) if name.endswith('_bias'): return mx.nd.zeros(shape) if name.endswith('_gamma'): return mx.nd.ones(shape) if name.endswith('_beta'): return mx.nd.zeros(shape) if name.endswith('_moving_mean'): return mx.nd.zeros(shape) if name.endswith('_moving_var'): return mx.nd.ones(shape) raise ValueError('Unknown name type for `{}`'.format(name))
def checkParams(mod, arg_params, aux_params, auto_fix=True, initializer=mx.init.Normal(0.01), logger=None): arg_params = ({} if (arg_params is None) else arg_params) aux_params = ({} if (aux_params is None) else aux_params) arg_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.param_names, mod._exec_group.param_arrays)} aux_shapes = {name: array[0].shape for (name, array) in zip(mod._exec_group.aux_names, mod._exec_group.aux_arrays)} (extra_arg_params, extra_aux_params) = ([], []) for name in arg_params.keys(): if (name not in arg_shapes): extra_arg_params.append(name) for name in aux_params.keys(): if (name not in aux_shapes): extra_aux_params.append(name) (miss_arg_params, miss_aux_params) = ([], []) for name in arg_shapes.keys(): if (name not in arg_params): miss_arg_params.append(name) for name in aux_shapes.keys(): if (name not in aux_params): miss_aux_params.append(name) (mismatch_arg_params, mismatch_aux_params) = ([], []) for name in arg_params.keys(): if ((name in arg_shapes) and (arg_shapes[name] != arg_params[name].shape)): mismatch_arg_params.append(name) for name in aux_params.keys(): if ((name in aux_shapes) and (aux_shapes[name] != aux_params[name].shape)): mismatch_aux_params.append(name) for name in extra_arg_params: info(logger, 'Find extra arg_params: {}: given {}'.format(name, arg_params[name].shape), 'red') for name in extra_aux_params: info(logger, 'Find extra aux_params: {}: given {}'.format(name, aux_params[name].shape), 'red') for name in miss_arg_params: info(logger, 'Find missing arg_params: {}: target {}'.format(name, arg_shapes[name]), 'red') for name in miss_aux_params: info(logger, 'Find missing aux_params: {}: target {}'.format(name, aux_shapes[name]), 'red') for name in mismatch_arg_params: info(logger, 'Find mismatch arg_params: {}: given {}, target {}'.format(name, arg_params[name].shape, arg_shapes[name]), 'red') for name in mismatch_aux_params: info(logger, 'Find mismatch aux_params: {}: given {}, target {}'.format(name, aux_params[name].shape, aux_shapes[name]), 'red') if (len((((((extra_arg_params + extra_aux_params) + miss_arg_params) + miss_aux_params) + mismatch_arg_params) + mismatch_aux_params)) == 0): return (arg_params, aux_params) if (not auto_fix): info(logger, 'Bad params not fixed.', 'red') return (arg_params, aux_params) for name in (extra_arg_params + mismatch_arg_params): del arg_params[name] for name in (extra_aux_params + mismatch_aux_params): del aux_params[name] attrs = mod._symbol.attr_dict() for name in (miss_arg_params + mismatch_arg_params): arg_params[name] = mx.nd.zeros(arg_shapes[name]) try: initializer(mx.init.InitDesc(name, attrs.get(name, None)), arg_params[name]) except ValueError: initializer(name, arg_params[name]) for name in (miss_aux_params + mismatch_aux_params): aux_params[name] = mx.nd.zeros(aux_shapes[name]) try: initializer(mx.init.InitDesc(name, attrs.get(name, None)), aux_params[name]) except ValueError: initializer(name, aux_params[name]) info(logger, 'Bad params auto fixed successfully.', 'red') return (arg_params, aux_params)
def run_eval(data_list, pred_root, gt_root, num_cls): def compute_confusion_matrix(names, label_root, pred_root, num_cls, num_threads=16, arr_=None): if (num_threads == 1): mat = np.zeros((num_cls, num_cls), np.float32) for name in names: gt = cv2.imread(os.path.join(label_root, (name + '.png')), 0).astype(np.int32) pred = cv2.imread(os.path.join(pred_root, (name + '.png')), 0).astype(np.int32) if (gt.shape != pred.shape): info(None, 'NAME {}, gt.shape != pred.shape: [{} vs. {}]'.format(name, gt.shape, pred.shape), 'red') continue valid = (gt < num_cls) mat += np.bincount(((gt[valid] * num_cls) + pred[valid]), minlength=(num_cls ** 2)).reshape(num_cls, (- 1)) if (arr_ is not None): arr_mat = np.frombuffer(arr_.get_obj(), np.float32) arr_mat += mat.ravel() return mat else: workload = np.full((num_threads,), (len(names) // num_threads), np.int32) if (workload.sum() < len(names)): workload[:(len(names) - workload.sum())] += 1 workload = np.cumsum(np.hstack([0, workload])) names_split = [names[i:j] for (i, j) in zip(workload[:(- 1)], workload[1:])] arr_ = mp.Array('f', np.zeros(((num_cls * num_cls),), np.float32)) mat = np.frombuffer(arr_.get_obj(), np.float32).reshape(num_cls, (- 1)) jobs = [mp.Process(target=compute_confusion_matrix, args=(_names, label_root, pred_root, num_cls, 1, arr_)) for _names in names_split] res = [job.start() for job in jobs] res = [job.join() for job in jobs] return mat.copy() def compute_eval_results(confmat): iou = (np.diag(confmat) / np.maximum(((confmat.sum(axis=0) + confmat.sum(axis=1)) - np.diag(confmat)), 1e-10)) return iou with open(data_list) as f: names = [x.strip() for x in f.readlines()] confmat = compute_confusion_matrix(names, gt_root, pred_root, num_cls) iou = compute_eval_results(confmat) msg = 'mIOU: {}\n{}\n\n'.format(iou.mean(), iou) print(msg)
def recode_cc_data(frame): ' Recodes numeric categorical variables into categorical character variables\n with more transparent values. \n \n Args:\n frame: Pandas DataFrame version of UCI credit card default data.\n \n Returns: \n H2OFrame with recoded values.\n \n ' sex_dict = {1: 'male', 2: 'female'} education_dict = {0: 'other', 1: 'graduate school', 2: 'university', 3: 'high school', 4: 'other', 5: 'other', 6: 'other'} marriage_dict = {0: 'other', 1: 'married', 2: 'single', 3: 'divorced'} pay_dict = {(- 2): 'no consumption', (- 1): 'pay duly', 0: 'use of revolving credit', 1: '1 month delay', 2: '2 month delay', 3: '3 month delay', 4: '4 month delay', 5: '5 month delay', 6: '6 month delay', 7: '7 month delay', 8: '8 month delay', 9: '9+ month delay'} frame['SEX'] = frame['SEX'].apply((lambda i: sex_dict[i])) frame['EDUCATION'] = frame['EDUCATION'].apply((lambda i: education_dict[i])) frame['MARRIAGE'] = frame['MARRIAGE'].apply((lambda i: marriage_dict[i])) for name in frame.columns: if (name in ['PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']): frame[name] = frame[name].apply((lambda i: pay_dict[i])) return h2o.H2OFrame(frame)
def generate_local_sample(row, frame, X, N=1000): ' Generates a perturbed sample around a row of interest.\n \n Args:\n row: Row of H2OFrame to be explained.\n frame: H2OFrame in which row is stored.\n X: List of model input variables.\n N: Number of samples to generate.\n \n Returns:\n Pandas DataFrame containing perturbed sample.\n \n ' sample_frame = pd.DataFrame(data=np.zeros(shape=(N, len(X))), columns=X) for (key, val) in frame[X].types.items(): if (val == 'enum'): rs = np.random.RandomState(11111) draw = rs.choice(frame[key].levels()[0], size=(1, N))[0] else: rs = np.random.RandomState(11111) loc = row[key][(0, 0)] sd = frame[key].sd() draw = rs.normal(loc, sd, (N, 1)) draw[(draw < 0)] = loc sample_frame[key] = draw return sample_frame
def plot_local_contrib(row, model, X, g_pred=None, scale=False): ' Plots reason codes in a bar chart. \n \n Args:\n \n row: Row of H2OFrame to be explained.\n model: H2O linear model used for generating reason codes.\n X: List of model input variables.\n g_pred: Prediction of model to be explained, sometimes denoted g, used for scaling.\n scale: Whether to rescale contributions to sum to model predictions.\n \n ' local_contrib_frame = pd.DataFrame(columns=['Name', 'Local Contribution', 'Sign']) for (key, val) in sorted(row[X].types.items()): contrib = 0 name = '' if (val == 'enum'): level = row[key][(0, 0)] name = '.'.join([str(key), str(level)]) if (name in model.coef()): contrib = model.coef()[name] else: name = key if (name in model.coef()): contrib = (row[name][(0, 0)] * model.coef()[name]) if (contrib != 0.0): local_contrib_frame = local_contrib_frame.append({'Name': name, 'Local Contribution': contrib, 'Sign': (contrib > 0)}, ignore_index=True) if scale: scaler = ((g_pred - model.coef()['Intercept']) / local_contrib_frame['Local Contribution'].sum()) local_contrib_frame['Local Contribution'] *= scaler _ = local_contrib_frame.plot(x='Name', y='Local Contribution', kind='bar', title='Reason Codes', color=local_contrib_frame.Sign.map({True: 'b', False: 'g'}), legend=False)
def recode_cc_data(frame): ' Recodes numeric categorical variables into categorical character variables\n with more transparent values. \n \n Args:\n frame: Pandas DataFrame version of UCI credit card default data.\n \n Returns: \n H2OFrame with recoded values.\n \n ' sex_dict = {1: 'male', 2: 'female'} education_dict = {0: 'other', 1: 'graduate school', 2: 'university', 3: 'high school', 4: 'other', 5: 'other', 6: 'other'} marriage_dict = {0: 'other', 1: 'married', 2: 'single', 3: 'divorced'} pay_dict = {(- 2): 'no consumption', (- 1): 'pay duly', 0: 'use of revolving credit', 1: '1 month delay', 2: '2 month delay', 3: '3 month delay', 4: '4 month delay', 5: '5 month delay', 6: '6 month delay', 7: '7 month delay', 8: '8 month delay', 9: '9+ month delay'} frame['SEX'] = frame['SEX'].apply((lambda i: sex_dict[i])) frame['EDUCATION'] = frame['EDUCATION'].apply((lambda i: education_dict[i])) frame['MARRIAGE'] = frame['MARRIAGE'].apply((lambda i: marriage_dict[i])) for name in frame.columns: if (name in ['PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6']): frame[name] = frame[name].apply((lambda i: pay_dict[i])) return h2o.H2OFrame(frame)
def get_percentile_dict(yhat, id_, frame): ' Returns the minimum, the maximum, and the deciles of a column, yhat, \n as the indices based on another column id_.\n \n Args:\n yhat: Column in which to find percentiles.\n id_: Id column that stores indices for percentiles of yhat.\n frame: H2OFrame containing yhat and id_. \n \n Returns:\n Dictionary of percentile values and index column values.\n \n ' sort_df = frame.as_data_frame() sort_df.sort_values(yhat, inplace=True) sort_df.reset_index(inplace=True) percentiles_dict = {} percentiles_dict[0] = sort_df.loc[(0, id_)] percentiles_dict[99] = sort_df.loc[((sort_df.shape[0] - 1), id_)] inc = (sort_df.shape[0] // 10) for i in range(1, 10): percentiles_dict[(i * 10)] = sort_df.loc[((i * inc), id_)] return percentiles_dict
def compute_iou(names, num_cls, target_root, gt_root, num_threads=16, arr_=None): _compute_iou = (lambda x: (np.diag(x) / (((x.sum(axis=0) + x.sum(axis=1)) - np.diag(x)) + 1e-10))) if isinstance(names, str): with open(names) as f: names = [name.strip() for name in f.readlines()] if (num_threads == 1): mat = np.zeros((num_cls, num_cls), np.float32) for name in names: gt = cv2.imread(os.path.join(gt_root, (name + '.png')), 0).astype(np.int32) pred = cv2.imread(os.path.join(target_root, (name + '.png')), 0).astype(np.int32) if (gt.shape != pred.shape): info(None, 'Name {}, gt.shape != pred.shape: [{} vs. {}]'.format(name, gt.shape, pred.shape)) continue valid = ((gt < num_cls) & (pred < num_cls)) mat += np.bincount(((gt[valid] * num_cls) + pred[valid]), minlength=(num_cls ** 2)).reshape(num_cls, (- 1)) if (arr_ is not None): arr_mat = np.frombuffer(arr_.get_obj(), np.float32) arr_mat += mat.ravel() else: return _compute_iou(mat.copy()) else: workload = np.full((num_threads,), (len(names) // num_threads), np.int32) if (workload.sum() < len(names)): workload[:(len(names) - workload.sum())] += 1 workload = np.cumsum(np.hstack([0, workload])) names_split = [names[i:j] for (i, j) in zip(workload[:(- 1)], workload[1:])] arr_ = mp.Array('f', np.zeros(((num_cls * num_cls),), np.float32)) mat = np.frombuffer(arr_.get_obj(), np.float32).reshape(num_cls, (- 1)) jobs = [mp.Process(target=compute_iou, args=(_names, num_cls, target_root, gt_root, 1, arr_)) for _names in names_split] [job.start() for job in jobs] [job.join() for job in jobs] return _compute_iou(mat.copy())
def Convolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None, **kwargs): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('conv') if (name is None) else name) stride = (((1,) * len(kernel)) if (stride is None) else stride) dilate = (((1,) * len(kernel)) if (dilate is None) else dilate) if isinstance(pad, str): input_size = kwargs.get('input_size', None) if (input_size is None): raise ValueError('`input_size` is needed for padding') del kwargs['input_size'] if isinstance(input_size, int): in_size_h = in_size_w = input_size else: (in_size_h, in_size_w) = input_size (ph0, ph1) = padding_helper(in_size_h, kernel[0], stride[0], pad) (pw0, pw1) = padding_helper(in_size_w, kernel[1], stride[1], pad) data = mx.sym.pad(data, mode='constant', pad_width=(0, 0, 0, 0, ph0, ph1, pw0, pw1)) pad = ((0,) * len(kernel)) else: pad = (((0,) * len(kernel)) if (pad is None) else pad) assert (len(kwargs) == 0), sorted(kwargs) W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight) if no_bias: x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W) else: B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias) x = mx.sym.Convolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B) return x
def Deconvolution(data, num_filter, kernel, stride=None, dilate=None, pad=None, adj=None, target_shape=None, num_group=1, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('deconv') if (name is None) else name) stride = (((1,) * len(kernel)) if (stride is None) else stride) dilate = (((1,) * len(kernel)) if (dilate is None) else dilate) pad = (((0,) * len(kernel)) if (pad is None) else pad) adj = (((0,) * len(kernel)) if (adj is None) else adj) target_shape = (tuple([]) if (target_shape is None) else target_shape) W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight) if no_bias: x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W) else: B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias) x = mx.sym.Deconvolution(data, num_filter=num_filter, kernel=kernel, stride=stride, dilate=dilate, pad=pad, adj=adj, target_shape=target_shape, num_group=num_group, no_bias=no_bias, name=(name if (reuse is None) else None), weight=W, bias=B) return x
def FullyConnected(data, num_hidden, flatten=True, no_bias=False, weight=None, bias=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('fc') if (name is None) else name) W = (get_variable((name + '_weight'), lr_mult, reuse) if (weight is None) else weight) if no_bias: x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, name=(name if (reuse is None) else None)) else: B = (get_variable((name + '_bias'), lr_mult, reuse) if (bias is None) else bias) x = mx.sym.FullyConnected(data, num_hidden=num_hidden, flatten=flatten, no_bias=no_bias, weight=W, bias=B, name=(name if (reuse is None) else None)) return x
def Relu(data, name=None): name = (GetLayerName.get('relu') if (name is None) else name) x = mx.sym.Activation(data, act_type='relu', name=name) return x
def LeakyRelu(data, slope=0.25, name=None): name = (GetLayerName.get('leakyRelu') if (name is None) else name) x = mx.sym.LeakyReLU(data, slope=slope, act_type='leaky', name=name) return x
def Tanh(data, name=None): name = (GetLayerName.get('tanh') if (name is None) else name) x = mx.sym.tanh(data, name=name) return x
def Swish(data, name=None): name = (GetLayerName.get('swish') if (name is None) else name) x = (data * mx.sym.sigmoid(data)) return x
def Pooling(data, kernel, stride=None, pad=None, pool_type='max', global_pool=False, name=None): name = (GetLayerName.get('pool') if (name is None) else name) stride = (kernel if (stride is None) else stride) pad = (((0,) * len(kernel)) if (pad is None) else pad) x = mx.sym.Pooling(data, kernel=kernel, stride=stride, pad=pad, pool_type=pool_type, global_pool=global_pool, name=name) return x
def Dropout(data, p, name=None): name = (GetLayerName.get('drop') if (name is None) else name) x = mx.sym.Dropout(data, p=p, name=name) return x
def BatchNorm(data, fix_gamma=False, momentum=0.9, eps=1e-05, use_global_stats=False, gamma=None, beta=None, moving_mean=None, moving_var=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('bn') if (name is None) else name) gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma) beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta) moving_mean = (get_variable((name + '_moving_mean'), 1, reuse) if (moving_mean is None) else moving_mean) moving_var = (get_variable((name + '_moving_var'), 1, reuse) if (moving_var is None) else moving_var) x = mx.sym.BatchNorm(data, fix_gamma=fix_gamma, momentum=momentum, eps=eps, use_global_stats=use_global_stats, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, name=(name if (reuse is None) else None)) return x
def InstanceNorm(data, eps=1e-05, gamma=None, beta=None, name=None, lr_mult=1, reuse=None): if (reuse is not None): assert (name is not None) name = (GetLayerName.get('in') if (name is None) else name) gamma = (get_variable((name + '_gamma'), lr_mult, reuse) if (gamma is None) else gamma) beta = (get_variable((name + '_beta'), lr_mult, reuse) if (beta is None) else beta) x = mx.sym.InstanceNorm(data, eps=eps, gamma=gamma, beta=beta, name=(name if (reuse is None) else None)) return x
def Flatten(data, name=None): name = (GetLayerName.get('flatten') if (name is None) else name) x = mx.sym.flatten(data, name=name) return x
def ConvRelu(*args, **kwargs): x = Conv(*args, **kwargs) x = Relu(x, (x.name + '_relu')) return x
def BNRelu(*args, **kwargs): x = BN(*args, **kwargs) x = Relu(x, (x.name + '_relu')) return x
def FCRelu(*args, **kwargs): x = FC(*args, **kwargs) x = Relu(x, (x.name + '_relu')) return x
def ConvBNRelu(*args, **kwargs): x = Conv(*args, **kwargs) x = BN(x, name=(x.name + '_bn'), lr_mult=kwargs.get('lr_mult', 1), reuse=kwargs.get('reuse', None)) x = Relu(x, (x.name + '_relu')) return x
def get_variable(name, lr_mult=1, reuse=None): if (reuse is None): return mx.sym.Variable(name, lr_mult=lr_mult) return reuse.get_internals()[name]
class GetLayerName(object): _name_count = {} @classmethod def get(cls, name_prefix): cnt = cls._name_count.get(name_prefix, 0) cls._name_count[name_prefix] = (cnt + 1) return (name_prefix + str(cnt))
def padding_helper(in_size, kernel_size, stride, pad_type='same'): pad_type = pad_type.lower() if (pad_type == 'same'): out_size = ((in_size // stride) + int(((in_size % stride) > 0))) pad_size = max(((((out_size - 1) * stride) + kernel_size) - in_size), 0) return ((pad_size // 2), (pad_size - (pad_size // 2))) else: raise ValueError(pad_type)
class OpConstant(mx.operator.CustomOp): def __init__(self, val): self.val = val def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], self.val) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): pass
@mx.operator.register('Constant') class OpConstantProp(mx.operator.CustomOpProp): def __init__(self, val_str, shape_str, type_str='float32'): super(OpConstantProp, self).__init__(need_top_grad=False) val = [float(x) for x in val_str.split(',')] shape = [int(x) for x in shape_str.split(',')] self.val = mx.nd.array(val, dtype=type_str).reshape(shape) def list_arguments(self): return [] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return (in_shape, [self.val.shape], []) def infer_type(self, in_type): return (in_type, [self.val.dtype], []) def create_operator(self, ctx, shapes, dtypes): return OpConstant(self.val.as_in_context(ctx))
def CustomConstantEncoder(value, dtype='float32'): if (not isinstance(value, np.ndarray)): if (not isinstance(value, (list, tuple))): value = [value] value = np.array(value, dtype=dtype) return (','.join([str(x) for x in value.ravel()]), ','.join([str(x) for x in value.shape]))
def Constant(value, dtype='float32'): assert isinstance(dtype, str), dtype (val, shape) = CustomConstantEncoder(value, dtype) return mx.sym.Custom(val_str=val, shape_str=shape, type_str=dtype, op_type='Constant')
class BilinearScale(mx.operator.CustomOp): def __init__(self, scale): self.scale = scale def forward(self, is_train, req, in_data, out_data, aux): x = in_data[0] (h, w) = x.shape[2:] new_h = (int(((h - 1) * self.scale)) + 1) new_w = (int(((w - 1) * self.scale)) + 1) x.attach_grad() with mx.autograd.record(): new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w) self.new_x = new_x self.x = x self.assign(out_data[0], req[0], new_x) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.new_x.backward(out_grad[0]) self.assign(in_grad[0], req[0], self.x.grad)
@mx.operator.register('BilinearScale') class BilinearScaleProp(mx.operator.CustomOpProp): def __init__(self, scale): super(BilinearScaleProp, self).__init__(need_top_grad=True) self.scale = float(scale) def infer_shape(self, in_shape): (n, c, h, w) = in_shape[0] new_h = (int(((h - 1) * self.scale)) + 1) new_w = (int(((w - 1) * self.scale)) + 1) return (in_shape, [(n, c, new_h, new_w)], []) def create_operator(self, ctx, shapes, dtypes): return BilinearScale(self.scale)
class BilinearScaleLike(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): (x, x_ref) = in_data (new_h, new_w) = x_ref.shape[2:] x.attach_grad() with mx.autograd.record(): new_x = mx.nd.contrib.BilinearResize2D(x, height=new_h, width=new_w) self.new_x = new_x self.x = x self.assign(out_data[0], req[0], new_x) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): self.new_x.backward(out_grad[0]) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], self.x.grad)
@mx.operator.register('BilinearScaleLike') class BilinearScaleLikeProp(mx.operator.CustomOpProp): def __init__(self): super(BilinearScaleLikeProp, self).__init__(need_top_grad=True) def list_arguments(self): return ['d1', 'd2'] def infer_shape(self, in_shape): out_shape = list(in_shape[1]) out_shape[1] = in_shape[0][1] return (in_shape, [out_shape], []) def create_operator(self, ctx, shapes, dtypes): return BilinearScaleLike()
class SegmentLoss(mx.operator.CustomOp): def __init__(self, has_grad_scale, onehot_label, grad_scale): self.has_grad_scale = has_grad_scale self.onehot_label = onehot_label self.grad_scale = grad_scale def forward(self, is_train, req, in_data, out_data, aux): prediction = mx.nd.softmax(in_data[0], axis=1) self.assign(out_data[0], req[0], prediction) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): prediction = out_data[0] if (not self.onehot_label): label = mx.nd.one_hot(in_data[1], depth=prediction.shape[1]).transpose((0, 3, 1, 2)) else: label = in_data[1] if (prediction.shape[2] != label.shape[2]): label = mx.nd.contrib.BilinearResize2D(label, height=prediction.shape[2], width=prediction.shape[3]) label = (mx.nd.one_hot(mx.nd.argmax(label, axis=1), depth=prediction.shape[1]).transpose((0, 3, 1, 2)) * (mx.nd.max(label, axis=1, keepdims=True) > 0.5)) mask = label.sum(axis=1, keepdims=True) num_pixel = mx.nd.maximum((mask.sum() / mask.shape[0]), 1) grad = (((prediction - label) * mask) / num_pixel) if self.has_grad_scale: grad_scale = in_data[2].reshape((- 1), 1, 1, 1) grad = (grad * grad_scale) grad = (grad * self.grad_scale) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], grad)
@mx.operator.register('SegmentLoss') class SegmentLossProp(mx.operator.CustomOpProp): def __init__(self, has_grad_scale=0, onehot_label=0, grad_scale=1): super(SegmentLossProp, self).__init__(need_top_grad=False) self.has_grad_scale = (int(has_grad_scale) > 0) self.onehot_label = (int(onehot_label) > 0) self.grad_scale = float(grad_scale) def list_arguments(self): if self.has_grad_scale: return ['data', 'label', 'scale'] else: return ['data', 'label'] def infer_shape(self, in_shape): return (in_shape, [in_shape[0]], []) def create_operator(self, ctx, shapes, dtypes): return SegmentLoss(self.has_grad_scale, self.onehot_label, self.grad_scale)
class MultiSigmoidLoss(mx.operator.CustomOp): def __init__(self, grad_scale): self.grad_scale = grad_scale def forward(self, is_train, req, in_data, out_data, aux): (logit, label) = in_data prediction = mx.nd.sigmoid(logit, axis=1) self.assign(out_data[0], req[0], prediction) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): prediction = out_data[0] label = in_data[1] grad = ((prediction - label) * self.grad_scale) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], grad)
@mx.operator.register('MultiSigmoidLoss') class MultiSigmoidLossProp(mx.operator.CustomOpProp): def __init__(self, grad_scale=1): super(MultiSigmoidLossProp, self).__init__(need_top_grad=False) self.grad_scale = float(grad_scale) def list_arguments(self): return ['data', 'label'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return (in_shape, [in_shape[0]], []) def create_operator(self, ctx, shapes, dtypes): return MultiSigmoidLoss(self.grad_scale)
class MultiSoftmaxLoss(mx.operator.CustomOp): def forward(self, is_train, req, in_data, out_data, aux): (logit, label) = in_data prediction = mx.nd.softmax(logit, axis=1) self.assign(out_data[0], req[0], prediction) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): prediction = out_data[0] label = in_data[1] grad = (prediction - label) in_grad[1][:] = 0 self.assign(in_grad[0], req[0], grad)
@mx.operator.register('MultiSoftmaxLoss') class MultiSoftmaxLossProp(mx.operator.CustomOpProp): def __init__(self): super(MultiSoftmaxLossProp, self).__init__(need_top_grad=False) def list_arguments(self): return ['data', 'label'] def list_outputs(self): return ['output'] def infer_shape(self, in_shape): return (in_shape, [in_shape[0]], []) def create_operator(self, ctx, shapes, dtypes): return MultiSoftmaxLoss()
def vgg16_deeplab(x, name=None, lr_mult=1, reuse=None): name = ('' if (name is None) else name) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 64, (3, 3), pad=(1, 1), name=(name + 'conv1_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool1')) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 128, (3, 3), pad=(1, 1), name=(name + 'conv2_2'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool2')) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 256, (3, 3), pad=(1, 1), name=(name + 'conv3_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(2, 2), pad=(1, 1), name=(name + 'pool3')) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), pad=(1, 1), name=(name + 'conv4_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool4')) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_1'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_2'), lr_mult=lr_mult, reuse=reuse) x = ConvRelu(x, 512, (3, 3), dilate=(2, 2), pad=(2, 2), name=(name + 'conv5_3'), lr_mult=lr_mult, reuse=reuse) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5')) x = Pool(x, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=(name + 'pool5a'), pool_type='avg') return x
def vgg16_largefov(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x = ConvRelu(x, 1024, (3, 3), dilate=(12, 12), pad=(12, 12), name=(name + 'fc6'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop6')) x = ConvRelu(x, 1024, (1, 1), name=(name + 'fc7'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop7')) x = Conv(x, num_cls, (1, 1), name=(name + 'fc8'), lr_mult=lr_mult, reuse=reuse) return x
def vgg16_aspp(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x_backbone = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x_aspp = [] for d in (6, 12, 18, 24): x = ConvRelu(x_backbone, 1024, (3, 3), dilate=(d, d), pad=(d, d), name=(name + ('fc6_aspp%d' % d)), reuse=reuse) x = Drop(x, 0.5) x = ConvRelu(x, 1024, (1, 1), name=(name + ('fc7_aspp%d' % d)), reuse=reuse) x = Drop(x, 0.5) x = Conv(x, num_cls, (1, 1), name=(name + ('fc8_aspp%d' % d)), lr_mult=lr_mult, reuse=reuse) x_aspp.append(x) x = sum(x_aspp) return x
def vgg16_cam(x, num_cls, name=None, lr_mult=10, reuse=None): name = ('' if (name is None) else name) x = vgg16_deeplab(x, name, lr_mult=1, reuse=reuse) x = ConvRelu(x, 1024, (3, 3), pad=(1, 1), name=(name + 'fc6'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop6')) x = ConvRelu(x, 1024, (1, 1), name=(name + 'fc7'), reuse=reuse) x = Drop(x, 0.5, name=(name + 'drop7')) x = Conv(x, num_cls, (1, 1), name=(name + 'fc8'), lr_mult=lr_mult, reuse=reuse) return x
class _VOC_proto(object): @staticmethod def _get_palette(): def bitget(bit, idx): return ((bit & (1 << idx)) > 0) cmap = [] for i in range(256): (r, g, b) = (0, 0, 0) idx = i for j in range(8): r = (r | (bitget(idx, 0) << (7 - j))) g = (g | (bitget(idx, 1) << (7 - j))) b = (b | (bitget(idx, 2) << (7 - j))) idx = (idx >> 3) cmap.append((b, g, r)) return np.array(cmap).astype(np.uint8) def __init__(self): self.categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] self.palette = self._get_palette() def name2index(self, name): return self.categories.index(name) def index2name(self, index): return self.categories[index] def get_annotation(self, filename, use_diff=False): tree = ET.parse(filename) root = tree.getroot() annotation = [] tmp_annotation = [] for obj in root.findall('object'): cat = obj.find('name').text non_diff = (1 - int(obj.find('difficult').text)) if (use_diff or non_diff): annotation.append(self.name2index(cat)) else: tmp_annotation.append(self.name2index(cat)) annotation = list(set(annotation)) if (len(annotation) == 0): annotation += list(set(tmp_annotation)) annotation.sort() return annotation
def imwrite(filename, image): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass cv2.imwrite(filename, image)
def npsave(filename, data): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass np.save(filename, data)
def pkldump(filename, data): dirname = os.path.dirname(filename) if (not os.path.exists(dirname)): try: os.makedirs(dirname) except: pass with open(filename, 'wb') as f: pickle.dump(data, f)
def imhstack(images, height=None): images = as_list(images) images = list(map(image2C3, images)) if (height is None): height = np.array([img.shape[0] for img in images]).max() images = [resize_height(img, height) for img in images] if (len(images) == 1): return images[0] images = [[img, np.full((height, 3, 3), 255, np.uint8)] for img in images] images = np.hstack(sum(images, [])) return images
def imvstack(images, width=None): images = as_list(images) images = list(map(image2C3, images)) if (width is None): width = np.array([img.shape[1] for img in images]).max() images = [resize_width(img, width) for img in images] if (len(images) == 1): return images[0] images = [[img, np.full((3, width, 3), 255, np.uint8)] for img in images] images = np.vstack(sum(images, [])) return images
def as_list(data): if (not isinstance(data, (list, tuple))): return [data] return list(data)