code stringlengths 17 6.64M |
|---|
def test_build_activation_layer():
with pytest.raises(TypeError):
cfg = 'ReLU'
build_activation_layer(cfg)
with pytest.raises(KeyError):
cfg = dict()
build_activation_layer(cfg)
with pytest.raises(KeyError):
cfg = dict(type='FancyReLU')
build_activation_layer(cfg)
for (type_name, module) in ACTIVATION_LAYERS.module_dict.items():
cfg['type'] = type_name
layer = build_activation_layer(cfg)
assert isinstance(layer, module)
act = build_activation_layer(dict(type='Clamp'))
x = (torch.randn(10) * 1000)
y = act(x)
assert np.logical_and((y >= (- 1)).numpy(), (y <= 1).numpy()).all()
act = build_activation_layer(dict(type='Clip', min=0))
y = act(x)
assert np.logical_and((y >= 0).numpy(), (y <= 1).numpy()).all()
act = build_activation_layer(dict(type='Clamp', max=0))
y = act(x)
assert np.logical_and((y >= (- 1)).numpy(), (y <= 0).numpy()).all()
|
def test_build_padding_layer():
with pytest.raises(TypeError):
cfg = 'reflect'
build_padding_layer(cfg)
with pytest.raises(KeyError):
cfg = dict()
build_padding_layer(cfg)
with pytest.raises(KeyError):
cfg = dict(type='FancyPad')
build_padding_layer(cfg)
for (type_name, module) in PADDING_LAYERS.module_dict.items():
cfg['type'] = type_name
layer = build_padding_layer(cfg, 2)
assert isinstance(layer, module)
input_x = torch.randn(1, 2, 5, 5)
cfg = dict(type='reflect')
padding_layer = build_padding_layer(cfg, 2)
res = padding_layer(input_x)
assert (res.shape == (1, 2, 9, 9))
|
def test_upsample_layer():
with pytest.raises(TypeError):
cfg = 'bilinear'
build_upsample_layer(cfg)
with pytest.raises(KeyError):
cfg = dict()
build_upsample_layer(cfg)
with pytest.raises(KeyError):
cfg = dict(type='FancyUpsample')
build_upsample_layer(cfg)
for type_name in ['nearest', 'bilinear']:
cfg['type'] = type_name
layer = build_upsample_layer(cfg)
assert isinstance(layer, nn.Upsample)
assert (layer.mode == type_name)
cfg = dict(type='deconv', in_channels=3, out_channels=3, kernel_size=3, stride=2)
layer = build_upsample_layer(cfg)
assert isinstance(layer, nn.ConvTranspose2d)
cfg = dict(type='deconv')
kwargs = dict(in_channels=3, out_channels=3, kernel_size=3, stride=2)
layer = build_upsample_layer(cfg, **kwargs)
assert isinstance(layer, nn.ConvTranspose2d)
assert (layer.in_channels == kwargs['in_channels'])
assert (layer.out_channels == kwargs['out_channels'])
assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size']))
assert (layer.stride == (kwargs['stride'], kwargs['stride']))
layer = build_upsample_layer(cfg, 3, 3, 3, 2)
assert isinstance(layer, nn.ConvTranspose2d)
assert (layer.in_channels == kwargs['in_channels'])
assert (layer.out_channels == kwargs['out_channels'])
assert (layer.kernel_size == (kwargs['kernel_size'], kwargs['kernel_size']))
assert (layer.stride == (kwargs['stride'], kwargs['stride']))
cfg = dict(type='pixel_shuffle', in_channels=3, out_channels=3, scale_factor=2, upsample_kernel=3)
layer = build_upsample_layer(cfg)
assert isinstance(layer, PixelShufflePack)
assert (layer.scale_factor == 2)
assert (layer.upsample_kernel == 3)
|
def test_pixel_shuffle_pack():
x_in = torch.rand(2, 3, 10, 10)
pixel_shuffle = PixelShufflePack(3, 3, scale_factor=2, upsample_kernel=3)
assert (pixel_shuffle.upsample_conv.kernel_size == (3, 3))
x_out = pixel_shuffle(x_in)
assert (x_out.shape == (2, 3, 20, 20))
|
def test_is_norm():
norm_set1 = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, nn.LayerNorm]
norm_set2 = [nn.GroupNorm]
for norm_type in norm_set1:
layer = norm_type(3)
assert is_norm(layer)
assert (not is_norm(layer, exclude=(norm_type,)))
for norm_type in norm_set2:
layer = norm_type(3, 6)
assert is_norm(layer)
assert (not is_norm(layer, exclude=(norm_type,)))
class MyNorm(nn.BatchNorm2d):
pass
layer = MyNorm(3)
assert is_norm(layer)
assert (not is_norm(layer, exclude=_BatchNorm))
assert (not is_norm(layer, exclude=(_BatchNorm,)))
layer = nn.Conv2d(3, 8, 1)
assert (not is_norm(layer))
with pytest.raises(TypeError):
layer = nn.BatchNorm1d(3)
is_norm(layer, exclude='BN')
with pytest.raises(TypeError):
layer = nn.BatchNorm1d(3)
is_norm(layer, exclude=('BN',))
|
def test_infer_plugin_abbr():
with pytest.raises(TypeError):
infer_plugin_abbr(0)
class MyPlugin():
_abbr_ = 'mp'
assert (infer_plugin_abbr(MyPlugin) == 'mp')
class FancyPlugin():
pass
assert (infer_plugin_abbr(FancyPlugin) == 'fancy_plugin')
|
def test_build_plugin_layer():
with pytest.raises(TypeError):
cfg = 'Plugin'
build_plugin_layer(cfg)
with pytest.raises(KeyError):
cfg = dict()
build_plugin_layer(cfg)
with pytest.raises(KeyError):
cfg = dict(type='FancyPlugin')
build_plugin_layer(cfg)
with pytest.raises(AssertionError):
cfg = dict(type='ConvModule')
build_plugin_layer(cfg, postfix=[1, 2])
for postfix in ['', '_test', 1]:
cfg = dict(type='ContextBlock')
(name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16, ratio=(1.0 / 4))
assert (name == ('context_block' + str(postfix)))
assert isinstance(layer, PLUGIN_LAYERS.module_dict['ContextBlock'])
for postfix in ['', '_test', 1]:
cfg = dict(type='GeneralizedAttention')
(name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16)
assert (name == ('gen_attention_block' + str(postfix)))
assert isinstance(layer, PLUGIN_LAYERS.module_dict['GeneralizedAttention'])
for postfix in ['', '_test', 1]:
cfg = dict(type='NonLocal2d')
(name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16)
assert (name == ('nonlocal_block' + str(postfix)))
assert isinstance(layer, PLUGIN_LAYERS.module_dict['NonLocal2d'])
for postfix in ['', '_test', 1]:
cfg = dict(type='ConvModule')
(name, layer) = build_plugin_layer(cfg, postfix=postfix, in_channels=16, out_channels=4, kernel_size=3)
assert (name == ('conv_block' + str(postfix)))
assert isinstance(layer, PLUGIN_LAYERS.module_dict['ConvModule'])
|
def test_context_block():
with pytest.raises(AssertionError):
ContextBlock(16, (1.0 / 4), pooling_type='unsupport_type')
with pytest.raises(AssertionError):
ContextBlock(16, (1.0 / 4), fusion_types='unsupport_type')
with pytest.raises(AssertionError):
ContextBlock(16, (1.0 / 4), fusion_types=('unsupport_type',))
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, (1.0 / 4), pooling_type='att')
out = context_block(imgs)
assert (context_block.conv_mask.in_channels == 16)
assert (context_block.conv_mask.out_channels == 1)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, (1.0 / 4), pooling_type='avg')
out = context_block(imgs)
assert hasattr(context_block, 'avg_pool')
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_add',))
out = context_block(imgs)
assert (context_block.channel_add_conv is not None)
assert (context_block.channel_mul_conv is None)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_mul',))
out = context_block(imgs)
assert (context_block.channel_add_conv is None)
assert (context_block.channel_mul_conv is not None)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
context_block = ContextBlock(16, (1.0 / 4), fusion_types=('channel_add', 'channel_mul'))
out = context_block(imgs)
assert (context_block.channel_add_conv is not None)
assert (context_block.channel_mul_conv is not None)
assert (out.shape == imgs.shape)
|
def test_conv2d_samepadding():
inputs = torch.rand((1, 3, 28, 28))
conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1)
output = conv(inputs)
assert (output.shape == inputs.shape)
inputs = torch.rand((1, 3, 13, 13))
conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=1)
output = conv(inputs)
assert (output.shape == inputs.shape)
inputs = torch.rand((1, 3, 28, 28))
conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2)
output = conv(inputs)
assert (output.shape == torch.Size([1, 3, 14, 14]))
inputs = torch.rand((1, 3, 13, 13))
conv = Conv2dAdaptivePadding(3, 3, kernel_size=3, stride=2)
output = conv(inputs)
assert (output.shape == torch.Size([1, 3, 7, 7]))
|
@CONV_LAYERS.register_module()
class ExampleConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_cfg=None):
super(ExampleConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.norm_cfg = norm_cfg
self.output_padding = (0, 0, 0)
self.transposed = False
self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size)
self.init_weights()
def forward(self, x):
x = self.conv0(x)
return x
def init_weights(self):
nn.init.constant_(self.conv0.weight, 0)
|
def test_conv_module():
with pytest.raises(AssertionError):
conv_cfg = 'conv'
ConvModule(3, 8, 2, conv_cfg=conv_cfg)
with pytest.raises(AssertionError):
norm_cfg = 'norm'
ConvModule(3, 8, 2, norm_cfg=norm_cfg)
with pytest.raises(KeyError):
act_cfg = dict(type='softmax')
ConvModule(3, 8, 2, act_cfg=act_cfg)
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
assert conv.with_activation
assert hasattr(conv, 'activate')
assert conv.with_norm
assert hasattr(conv, 'norm')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = ConvModule(3, 8, 2)
assert conv.with_activation
assert hasattr(conv, 'activate')
assert (not conv.with_norm)
assert (conv.norm is None)
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = ConvModule(3, 8, 2, act_cfg=None)
assert (not conv.with_norm)
assert (conv.norm is None)
assert (not conv.with_activation)
assert (not hasattr(conv, 'activate'))
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv_module = ConvModule(3, 8, 2, conv_cfg=dict(type='ExampleConv'), act_cfg=None)
assert torch.equal(conv_module.conv.conv0.weight, torch.zeros(8, 3, 2, 2))
conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
assert hasattr(conv.conv, 'weight_orig')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect')
assert isinstance(conv.padding_layer, nn.ReflectionPad2d)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
with pytest.raises(KeyError):
conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists')
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
assert isinstance(conv.activate, nn.LeakyReLU)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Tanh'))
assert isinstance(conv.activate, nn.Tanh)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Sigmoid'))
assert isinstance(conv.activate, nn.Sigmoid)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='PReLU'))
assert isinstance(conv.activate, nn.PReLU)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSwish'))
if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.7'))):
assert isinstance(conv.activate, HSwish)
else:
assert isinstance(conv.activate, nn.Hardswish)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSigmoid'))
assert isinstance(conv.activate, HSigmoid)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
|
def test_bias():
conv = ConvModule(3, 8, 2)
assert (conv.conv.bias is not None)
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
assert (conv.conv.bias is None)
conv = ConvModule(3, 8, 2, bias=False)
assert (conv.conv.bias is None)
with pytest.warns(UserWarning) as record:
ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='BN'))
assert (len(record) == 1)
assert (record[0].message.args[0] == 'Unnecessary conv bias before batch/instance norm')
with pytest.warns(UserWarning) as record:
ConvModule(3, 8, 2, bias=True, norm_cfg=dict(type='IN'))
assert (len(record) == 1)
assert (record[0].message.args[0] == 'Unnecessary conv bias before batch/instance norm')
with pytest.warns(UserWarning) as record:
norm_cfg = dict(type='GN', num_groups=1)
ConvModule(3, 8, 2, bias=True, norm_cfg=norm_cfg)
warnings.warn('No warnings')
assert (len(record) == 1)
assert (record[0].message.args[0] == 'No warnings')
|
def conv_forward(self, x):
return (x + '_conv')
|
def bn_forward(self, x):
return (x + '_bn')
|
def relu_forward(self, x):
return (x + '_relu')
|
@patch('torch.nn.ReLU.forward', relu_forward)
@patch('torch.nn.BatchNorm2d.forward', bn_forward)
@patch('torch.nn.Conv2d.forward', conv_forward)
def test_order():
with pytest.raises(AssertionError):
order = ['conv', 'norm', 'act']
ConvModule(3, 8, 2, order=order)
with pytest.raises(AssertionError):
order = ('conv', 'norm')
ConvModule(3, 8, 2, order=order)
with pytest.raises(AssertionError):
order = ('conv', 'norm', 'norm')
ConvModule(3, 8, 2, order=order)
with pytest.raises(AssertionError):
order = ('conv', 'norm', 'something')
ConvModule(3, 8, 2, order=order)
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
out = conv('input')
assert (out == 'input_conv_bn_relu')
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'), order=('norm', 'conv', 'act'))
out = conv('input')
assert (out == 'input_bn_conv_relu')
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
out = conv('input', activate=False)
assert (out == 'input_conv_bn')
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
out = conv('input', norm=False)
assert (out == 'input_conv_relu')
|
def test_depthwise_separable_conv():
with pytest.raises(AssertionError):
DepthwiseSeparableConvModule(4, 8, 2, groups=2)
conv = DepthwiseSeparableConvModule(3, 8, 2)
assert (conv.depthwise_conv.conv.groups == 3)
assert (conv.pointwise_conv.conv.kernel_size == (1, 1))
assert (not conv.depthwise_conv.with_norm)
assert (not conv.pointwise_conv.with_norm)
assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, dw_norm_cfg=dict(type='BN'))
assert (conv.depthwise_conv.norm_name == 'bn')
assert (not conv.pointwise_conv.with_norm)
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, pw_norm_cfg=dict(type='BN'))
assert (not conv.depthwise_conv.with_norm)
assert (conv.pointwise_conv.norm_name == 'bn')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
assert (conv.depthwise_conv.norm_name == 'bn')
assert (conv.pointwise_conv.norm_name == 'bn')
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 2, order=('norm', 'conv', 'act'))
x = torch.rand(1, 3, 256, 256)
output = conv(x)
assert (output.shape == (1, 8, 255, 255))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
assert hasattr(conv.depthwise_conv.conv, 'weight_orig')
assert hasattr(conv.pointwise_conv.conv, 'weight_orig')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, padding_mode='reflect')
assert isinstance(conv.depthwise_conv.padding_layer, nn.ReflectionPad2d)
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, dw_act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'ReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, pw_act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'ReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
conv = DepthwiseSeparableConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
assert (conv.depthwise_conv.activate.__class__.__name__ == 'LeakyReLU')
assert (conv.pointwise_conv.activate.__class__.__name__ == 'LeakyReLU')
output = conv(x)
assert (output.shape == (1, 8, 256, 256))
|
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(3, 8, 3)
def forward(self, imgs):
x = torch.randn((1, *imgs))
return self.conv2d(x)
|
def input_constructor(x):
return dict(imgs=x)
|
def test_flops_counter():
with pytest.raises(AssertionError):
model = nn.Conv2d(3, 8, 3)
input_res = [1, 3, 16, 16]
get_model_complexity_info(model, input_res)
with pytest.raises(AssertionError):
model = nn.Conv2d(3, 8, 3)
input_res = tuple()
get_model_complexity_info(model, input_res)
for item in gt_results:
model = item['model']
input = item['input']
(flops, params) = get_model_complexity_info(model, input, as_strings=False, print_per_layer_stat=False)
assert ((flops == item['flops']) and (params == item['params']))
model = ExampleModel()
x = (3, 16, 16)
(flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=False, input_constructor=input_constructor)
assert ((flops == 43904.0) and (params == 224.0))
model = nn.Conv3d(3, 8, 3)
x = (3, 3, 512, 512)
(flops, params) = get_model_complexity_info(model, x, print_per_layer_stat=False)
assert ((flops == '0.17 GFLOPs') and (params == str(656)))
model = nn.Conv1d(3, 8, 3)
x = (3, 16)
out = StringIO()
get_model_complexity_info(model, x, ost=out)
assert (out.getvalue() == 'Conv1d(0.0 M, 100.000% Params, 0.0 GFLOPs, 100.000% FLOPs, 3, 8, kernel_size=(3,), stride=(1,))\n')
model = nn.Sequential(nn.Conv2d(3, 8, 3), nn.Flatten(), nn.Linear(1568, 2))
x = (3, 16, 16)
(flops, params) = get_model_complexity_info(model, x, as_strings=False, print_per_layer_stat=True)
assert ((flops == 47040.0) and (params == 3362))
|
def test_flops_to_string():
flops = (6.54321 * (10.0 ** 9))
assert (flops_to_string(flops) == '6.54 GFLOPs')
assert (flops_to_string(flops, 'MFLOPs') == '6543.21 MFLOPs')
assert (flops_to_string(flops, 'KFLOPs') == '6543210.0 KFLOPs')
assert (flops_to_string(flops, 'FLOPs') == '6543210000.0 FLOPs')
assert (flops_to_string(flops, precision=4) == '6.5432 GFLOPs')
flops = (6.54321 * (10.0 ** 9))
assert (flops_to_string(flops, None) == '6.54 GFLOPs')
flops = (3.21 * (10.0 ** 7))
assert (flops_to_string(flops, None) == '32.1 MFLOPs')
flops = (5.4 * (10.0 ** 3))
assert (flops_to_string(flops, None) == '5.4 KFLOPs')
flops = 987
assert (flops_to_string(flops, None) == '987 FLOPs')
|
def test_params_to_string():
num_params = (3.21 * (10.0 ** 7))
assert (params_to_string(num_params) == '32.1 M')
num_params = (4.56 * (10.0 ** 5))
assert (params_to_string(num_params) == '456.0 k')
num_params = (7.89 * (10.0 ** 2))
assert (params_to_string(num_params) == '789.0')
num_params = (6.54321 * (10.0 ** 7))
assert (params_to_string(num_params, 'M') == '65.43 M')
assert (params_to_string(num_params, 'K') == '65432.1 K')
assert (params_to_string(num_params, '') == '65432100.0')
assert (params_to_string(num_params, precision=4) == '65.4321 M')
|
def test_fuse_conv_bn():
inputs = torch.rand((1, 3, 5, 5))
modules = nn.ModuleList()
modules.append(nn.BatchNorm2d(3))
modules.append(ConvModule(3, 5, 3, norm_cfg=dict(type='BN')))
modules.append(ConvModule(5, 5, 3, norm_cfg=dict(type='BN')))
modules = nn.Sequential(*modules)
fused_modules = fuse_conv_bn(modules)
assert torch.equal(modules(inputs), fused_modules(inputs))
|
def test_context_block():
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, attention_type='1000')
assert (gen_attention_block.query_conv.in_channels == 16)
assert (gen_attention_block.key_conv.in_channels == 16)
assert (gen_attention_block.key_conv.in_channels == 16)
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, attention_type='0100')
assert (gen_attention_block.query_conv.in_channels == 16)
assert (gen_attention_block.appr_geom_fc_x.in_features == 8)
assert (gen_attention_block.appr_geom_fc_y.in_features == 8)
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, attention_type='0010')
assert (gen_attention_block.key_conv.in_channels == 16)
assert hasattr(gen_attention_block, 'appr_bias')
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, attention_type='0001')
assert (gen_attention_block.appr_geom_fc_x.in_features == 8)
assert (gen_attention_block.appr_geom_fc_y.in_features == 8)
assert hasattr(gen_attention_block, 'geom_bias')
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 256, 20, 20)
gen_attention_block = GeneralizedAttention(256, spatial_range=10)
assert hasattr(gen_attention_block, 'local_constraint_map')
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, q_stride=2)
assert (gen_attention_block.q_downsample is not None)
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 16, 20, 20)
gen_attention_block = GeneralizedAttention(16, kv_stride=2)
assert (gen_attention_block.kv_downsample is not None)
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
if torch.cuda.is_available():
imgs = torch.randn(2, 16, 20, 20).cuda().to(torch.half)
gen_attention_block = GeneralizedAttention(16, spatial_range=(- 1), num_heads=8, attention_type='1111', kv_stride=2)
gen_attention_block.cuda().type(torch.half)
out = gen_attention_block(imgs)
assert (out.shape == imgs.shape)
|
def test_hsigmoid():
with pytest.raises(AssertionError):
HSigmoid(divisor=0)
act = HSigmoid()
input_shape = torch.Size([1, 3, 64, 64])
input = torch.randn(input_shape)
output = act(input)
expected_output = torch.min(torch.max(((input + 3) / 6), torch.zeros(input_shape)), torch.ones(input_shape))
assert (output.shape == expected_output.shape)
assert torch.equal(output, expected_output)
act = HSigmoid(1, 2, 0, 1)
input_shape = torch.Size([1, 3, 64, 64])
input = torch.randn(input_shape)
output = act(input)
expected_output = torch.min(torch.max(((input + 1) / 2), torch.zeros(input_shape)), torch.ones(input_shape))
assert (output.shape == expected_output.shape)
assert torch.equal(output, expected_output)
|
def test_hswish():
act = HSwish(inplace=True)
assert act.act.inplace
act = HSwish()
assert (not act.act.inplace)
input = torch.randn(1, 3, 64, 64)
expected_output = ((input * relu6((input + 3))) / 6)
output = act(input)
assert (output.shape == expected_output.shape)
assert torch.equal(output, expected_output)
|
def test_build_model_from_cfg():
BACKBONES = mmcv.Registry('backbone', build_func=build_model_from_cfg)
@BACKBONES.register_module()
class ResNet(nn.Module):
def __init__(self, depth, stages=4):
super().__init__()
self.depth = depth
self.stages = stages
def forward(self, x):
return x
@BACKBONES.register_module()
class ResNeXt(nn.Module):
def __init__(self, depth, stages=4):
super().__init__()
self.depth = depth
self.stages = stages
def forward(self, x):
return x
cfg = dict(type='ResNet', depth=50)
model = BACKBONES.build(cfg)
assert isinstance(model, ResNet)
assert ((model.depth == 50) and (model.stages == 4))
cfg = dict(type='ResNeXt', depth=50, stages=3)
model = BACKBONES.build(cfg)
assert isinstance(model, ResNeXt)
assert ((model.depth == 50) and (model.stages == 3))
cfg = [dict(type='ResNet', depth=50), dict(type='ResNeXt', depth=50, stages=3)]
model = BACKBONES.build(cfg)
assert isinstance(model, nn.Sequential)
assert isinstance(model[0], ResNet)
assert ((model[0].depth == 50) and (model[0].stages == 4))
assert isinstance(model[1], ResNeXt)
assert ((model[1].depth == 50) and (model[1].stages == 3))
NEW_MODELS = mmcv.Registry('models', parent=MODELS, scope='new')
assert (NEW_MODELS.build_func is build_model_from_cfg)
def pseudo_build(cfg):
return cfg
NEW_MODELS = mmcv.Registry('models', parent=MODELS, build_func=pseudo_build)
assert (NEW_MODELS.build_func is pseudo_build)
|
def test_nonlocal():
with pytest.raises(ValueError):
_NonLocalNd(3, mode='unsupport_mode')
_NonLocalNd(3)
_NonLocalNd(3, norm_cfg=dict(type='BN'))
_NonLocalNd(3, zeros_init=False)
_NonLocalNd(3, norm_cfg=dict(type='BN'), zeros_init=False)
|
def test_nonlocal3d():
imgs = torch.randn(2, 3, 10, 20, 20)
nonlocal_3d = NonLocal3d(3)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_3d.cuda()
out = nonlocal_3d(imgs)
assert (out.shape == imgs.shape)
nonlocal_3d = NonLocal3d(3, mode='dot_product')
assert (nonlocal_3d.mode == 'dot_product')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_3d.cuda()
out = nonlocal_3d(imgs)
assert (out.shape == imgs.shape)
nonlocal_3d = NonLocal3d(3, mode='concatenation')
assert (nonlocal_3d.mode == 'concatenation')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_3d.cuda()
out = nonlocal_3d(imgs)
assert (out.shape == imgs.shape)
nonlocal_3d = NonLocal3d(3, mode='gaussian')
assert (not hasattr(nonlocal_3d, 'phi'))
assert (nonlocal_3d.mode == 'gaussian')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_3d.cuda()
out = nonlocal_3d(imgs)
assert (out.shape == imgs.shape)
nonlocal_3d = NonLocal3d(3, mode='gaussian', sub_sample=True)
assert (isinstance(nonlocal_3d.g, nn.Sequential) and (len(nonlocal_3d.g) == 2))
assert isinstance(nonlocal_3d.g[1], nn.MaxPool3d)
assert (nonlocal_3d.g[1].kernel_size == (1, 2, 2))
assert isinstance(nonlocal_3d.phi, nn.MaxPool3d)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_3d.cuda()
out = nonlocal_3d(imgs)
assert (out.shape == imgs.shape)
nonlocal_3d = NonLocal3d(3, mode='dot_product', sub_sample=True)
for m in [nonlocal_3d.g, nonlocal_3d.phi]:
assert (isinstance(m, nn.Sequential) and (len(m) == 2))
assert isinstance(m[1], nn.MaxPool3d)
assert (m[1].kernel_size == (1, 2, 2))
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_3d.cuda()
out = nonlocal_3d(imgs)
assert (out.shape == imgs.shape)
|
def test_nonlocal2d():
imgs = torch.randn(2, 3, 20, 20)
nonlocal_2d = NonLocal2d(3)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_2d.cuda()
out = nonlocal_2d(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 3, 20, 20)
nonlocal_2d = NonLocal2d(3, mode='dot_product')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_2d.cuda()
out = nonlocal_2d(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 3, 20, 20)
nonlocal_2d = NonLocal2d(3, mode='concatenation')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_2d.cuda()
out = nonlocal_2d(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 3, 20, 20)
nonlocal_2d = NonLocal2d(3, mode='gaussian')
assert (not hasattr(nonlocal_2d, 'phi'))
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_2d.cuda()
out = nonlocal_2d(imgs)
assert (out.shape == imgs.shape)
nonlocal_2d = NonLocal2d(3, mode='gaussian', sub_sample=True)
assert (isinstance(nonlocal_2d.g, nn.Sequential) and (len(nonlocal_2d.g) == 2))
assert isinstance(nonlocal_2d.g[1], nn.MaxPool2d)
assert (nonlocal_2d.g[1].kernel_size == (2, 2))
assert isinstance(nonlocal_2d.phi, nn.MaxPool2d)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_2d.cuda()
out = nonlocal_2d(imgs)
assert (out.shape == imgs.shape)
nonlocal_2d = NonLocal2d(3, mode='dot_product', sub_sample=True)
for m in [nonlocal_2d.g, nonlocal_2d.phi]:
assert (isinstance(m, nn.Sequential) and (len(m) == 2))
assert isinstance(m[1], nn.MaxPool2d)
assert (m[1].kernel_size == (2, 2))
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_2d.cuda()
out = nonlocal_2d(imgs)
assert (out.shape == imgs.shape)
|
def test_nonlocal1d():
imgs = torch.randn(2, 3, 20)
nonlocal_1d = NonLocal1d(3)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_1d.cuda()
out = nonlocal_1d(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 3, 20)
nonlocal_1d = NonLocal1d(3, mode='dot_product')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_1d.cuda()
out = nonlocal_1d(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 3, 20)
nonlocal_1d = NonLocal1d(3, mode='concatenation')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_1d.cuda()
out = nonlocal_1d(imgs)
assert (out.shape == imgs.shape)
imgs = torch.randn(2, 3, 20)
nonlocal_1d = NonLocal1d(3, mode='gaussian')
assert (not hasattr(nonlocal_1d, 'phi'))
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
imgs = imgs.cuda()
nonlocal_1d.cuda()
out = nonlocal_1d(imgs)
assert (out.shape == imgs.shape)
nonlocal_1d = NonLocal1d(3, mode='gaussian', sub_sample=True)
assert (isinstance(nonlocal_1d.g, nn.Sequential) and (len(nonlocal_1d.g) == 2))
assert isinstance(nonlocal_1d.g[1], nn.MaxPool1d)
assert (nonlocal_1d.g[1].kernel_size == 2)
assert isinstance(nonlocal_1d.phi, nn.MaxPool1d)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_1d.cuda()
out = nonlocal_1d(imgs)
assert (out.shape == imgs.shape)
nonlocal_1d = NonLocal1d(3, mode='dot_product', sub_sample=True)
for m in [nonlocal_1d.g, nonlocal_1d.phi]:
assert (isinstance(m, nn.Sequential) and (len(m) == 2))
assert isinstance(m[1], nn.MaxPool1d)
assert (m[1].kernel_size == 2)
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
nonlocal_1d.cuda()
out = nonlocal_1d(imgs)
assert (out.shape == imgs.shape)
|
def test_revert_syncbn():
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='SyncBN'))
x = torch.randn(1, 3, 10, 10)
with pytest.raises(ValueError):
y = conv(x)
conv = revert_sync_batchnorm(conv)
y = conv(x)
assert (y.shape == (1, 8, 9, 9))
|
def test_revert_mmsyncbn():
if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) < 2)):
print('Must run on slurm with more than 1 process!\nsrun -p test --gres=gpu:2 -n2')
return
rank = int(os.environ['SLURM_PROCID'])
world_size = int(os.environ['SLURM_NTASKS'])
local_rank = int(os.environ['SLURM_LOCALID'])
node_list = str(os.environ['SLURM_NODELIST'])
node_parts = re.findall('[0-9]+', node_list)
os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' + f'.{node_parts[3]}.{node_parts[4]}')
os.environ['MASTER_PORT'] = '12341'
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['RANK'] = str(rank)
dist.init_process_group('nccl')
torch.cuda.set_device(local_rank)
x = torch.randn(1, 3, 10, 10).cuda()
dist.broadcast(x, src=0)
conv = ConvModule(3, 8, 2, norm_cfg=dict(type='MMSyncBN')).cuda()
conv.eval()
y_mmsyncbn = conv(x).detach().cpu().numpy()
conv = revert_sync_batchnorm(conv)
y_bn = conv(x).detach().cpu().numpy()
assert np.all(np.isclose(y_bn, y_mmsyncbn, 0.001))
(conv, x) = (conv.to('cpu'), x.to('cpu'))
y_bn_cpu = conv(x).detach().numpy()
assert np.all(np.isclose(y_bn, y_bn_cpu, 0.001))
|
def test_scale():
scale = Scale()
assert (scale.scale.data == 1.0)
assert (scale.scale.dtype == torch.float)
x = torch.rand(1, 3, 64, 64)
output = scale(x)
assert (output.shape == (1, 3, 64, 64))
scale = Scale(10.0)
assert (scale.scale.data == 10.0)
assert (scale.scale.dtype == torch.float)
x = torch.rand(1, 3, 64, 64)
output = scale(x)
assert (output.shape == (1, 3, 64, 64))
|
def test_swish():
act = Swish()
input = torch.randn(1, 3, 64, 64)
expected_output = (input * F.sigmoid(input))
output = act(input)
assert (output.shape == expected_output.shape)
assert torch.equal(output, expected_output)
|
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (16, 32))
input = torch.rand(1, 1, 16, 17)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (16, 32))
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (12, 14))
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (10, 13))
kernel_size = (11, 11)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
assert ((out.shape[2], out.shape[3]) == (21, 21))
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
dilation_out = adap_pad(input)
assert ((dilation_out.shape[2], dilation_out.shape[3]) == (16, 21))
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding)
kernel79_out = adap_pad(input)
assert ((kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21))
assert (kernel79_out.shape == dilation_out.shape)
with pytest.raises(AssertionError):
AdaptivePadding(kernel_size=kernel_size, stride=stride, dilation=dilation, padding=1)
|
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=1, norm_cfg=None)
(x1, shape) = patch_merge_1(dummy_input)
assert (x1.shape == (2, 2, 10))
assert (shape == (1, 2))
assert ((shape[0] * shape[1]) == x1.shape[1])
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
patch_merge_2 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=None)
(x2, shape) = patch_merge_2(dummy_input)
assert (x2.shape == (2, 1, 10))
assert (shape == (1, 1))
assert ((shape[0] * shape[1]) == x2.shape[1])
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size)
(x3, shape) = patch_merge_3(dummy_input)
assert (x3.shape == (2, 1, 10))
assert (shape == (1, 1))
assert ((shape[0] * shape[1]) == x3.shape[1])
assert (patch_merge_3.init_out_size[1] == ((((input_size[0] - (2 * 4)) - 1) // 2) + 1))
assert (patch_merge_3.init_out_size[0] == ((((input_size[0] - (2 * 4)) - 1) // 2) + 1))
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size)
(_, shape) = patch_merge_3(dummy_input)
assert (shape == patch_merge_3.init_out_size)
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
patch_merge_3 = PatchEmbed(in_channels=C, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=0, dilation=2, norm_cfg=dict(type='LN'), input_size=input_size)
(_, shape) = patch_merge_3(dummy_input)
assert (shape == patch_merge_3.init_out_size)
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_embed(x)
assert (x_out.size() == (B, 25, 3))
assert (out_size == (5, 5))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_embed(x)
assert (x_out.size() == (B, 1, 3))
assert (out_size == (1, 1))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_embed(x)
assert (x_out.size() == (B, 2, 3))
assert (out_size == (2, 1))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(in_channels=in_c, embed_dims=embed_dims, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_embed(x)
assert (x_out.size() == (B, 3, 3))
assert (out_size == (1, 3))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
|
def test_patch_merging():
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(B, L, C) = (1, 100, 3)
input_size = (10, 10)
x = torch.rand(B, L, C)
(x_out, out_size) = patch_merge(x, input_size)
assert (x_out.size() == (1, 16, 4))
assert (out_size == (4, 4))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(B, L, C) = (1, 100, 4)
input_size = (10, 10)
x = torch.rand(B, L, C)
(x_out, out_size) = patch_merge(x, input_size)
assert (x_out.size() == (1, 4, 5))
assert (out_size == (2, 2))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = (input_size[0] * input_size[1])
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_merge(x, input_size)
assert (x_out.size() == (B, 25, 3))
assert (out_size == (5, 5))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = (input_size[0] * input_size[1])
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_merge(x, input_size)
assert (x_out.size() == (B, 1, 3))
assert (out_size == (1, 1))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = (input_size[0] * input_size[1])
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_merge(x, input_size)
assert (x_out.size() == (B, 2, 3))
assert (out_size == (2, 1))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = (input_size[0] * input_size[1])
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(in_channels=in_c, out_channels=out_c, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
(x_out, out_size) = patch_merge(x, input_size)
assert (x_out.size() == (B, 3, 3))
assert (out_size == (1, 3))
assert (x_out.size(1) == (out_size[0] * out_size[1]))
|
def test_multiheadattention():
MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='Dropout', drop_prob=0.0), batch_first=True)
batch_dim = 2
embed_dim = 5
num_query = 100
attn_batch_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='DropPath', drop_prob=0.0), batch_first=True)
attn_query_first = MultiheadAttention(embed_dims=5, num_heads=5, attn_drop=0, proj_drop=0, dropout_layer=dict(type='DropPath', drop_prob=0.0), batch_first=False)
param_dict = dict(attn_query_first.named_parameters())
for (n, v) in attn_batch_first.named_parameters():
param_dict[n].data = v.data
input_batch_first = torch.rand(batch_dim, num_query, embed_dim)
input_query_first = input_batch_first.transpose(0, 1)
assert torch.allclose(attn_query_first(input_query_first).sum(), attn_batch_first(input_batch_first).sum())
key_batch_first = torch.rand(batch_dim, num_query, embed_dim)
key_query_first = key_batch_first.transpose(0, 1)
assert torch.allclose(attn_query_first(input_query_first, key_query_first).sum(), attn_batch_first(input_batch_first, key_batch_first).sum())
identity = torch.ones_like(input_query_first)
assert torch.allclose(attn_query_first(input_query_first, key_query_first, residual=identity).sum(), ((attn_batch_first(input_batch_first, key_batch_first).sum() + identity.sum()) - input_batch_first.sum()))
assert torch.allclose(attn_query_first(input_query_first, key_query_first, identity=identity).sum(), ((attn_batch_first(input_batch_first, key_batch_first).sum() + identity.sum()) - input_batch_first.sum()))
(attn_query_first(input_query_first, key_query_first, identity=identity).sum(),)
|
def test_ffn():
with pytest.raises(AssertionError):
FFN(num_fcs=1)
FFN(dropout=0, add_residual=True)
ffn = FFN(dropout=0, add_identity=True)
input_tensor = torch.rand(2, 20, 256)
input_tensor_nbc = input_tensor.transpose(0, 1)
assert torch.allclose(ffn(input_tensor).sum(), ffn(input_tensor_nbc).sum())
residual = torch.rand_like(input_tensor)
torch.allclose(ffn(input_tensor, residual=residual).sum(), ((ffn(input_tensor).sum() + residual.sum()) - input_tensor.sum()))
torch.allclose(ffn(input_tensor, identity=residual).sum(), ((ffn(input_tensor).sum() + residual.sum()) - input_tensor.sum()))
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='Cuda not available')
def test_basetransformerlayer_cuda():
operation_order = ('self_attn', 'ffn')
baselayer = BaseTransformerLayer(operation_order=operation_order, batch_first=True, attn_cfgs=dict(type='MultiheadAttention', embed_dims=256, num_heads=8))
baselayers = ModuleList([copy.deepcopy(baselayer) for _ in range(2)])
baselayers.to('cuda')
x = torch.rand(2, 10, 256).cuda()
for m in baselayers:
x = m(x)
assert (x.shape == torch.Size([2, 10, 256]))
|
@pytest.mark.parametrize('embed_dims', [False, 256])
def test_basetransformerlayer(embed_dims):
attn_cfgs = (dict(type='MultiheadAttention', embed_dims=256, num_heads=8),)
if embed_dims:
ffn_cfgs = dict(type='FFN', embed_dims=embed_dims, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True))
else:
ffn_cfgs = dict(type='FFN', feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True))
feedforward_channels = 2048
ffn_dropout = 0.1
operation_order = ('self_attn', 'norm', 'ffn', 'norm')
baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, ffn_cfgs=ffn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order)
assert (baselayer.batch_first is False)
assert (baselayer.ffns[0].feedforward_channels == feedforward_channels)
attn_cfgs = (dict(type='MultiheadAttention', num_heads=8, embed_dims=256),)
feedforward_channels = 2048
ffn_dropout = 0.1
operation_order = ('self_attn', 'norm', 'ffn', 'norm')
baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, batch_first=True)
assert baselayer.attentions[0].batch_first
in_tensor = torch.rand(2, 10, 256)
baselayer(in_tensor)
|
def test_transformerlayersequence():
squeue = TransformerLayerSequence(num_layers=6, transformerlayers=dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256, num_heads=4)], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')))
assert (len(squeue.layers) == 6)
assert (squeue.pre_norm is False)
with pytest.raises(AssertionError):
TransformerLayerSequence(num_layers=6, transformerlayers=[dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256)], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))])
|
def test_drop_path():
drop_path = DropPath(drop_prob=0)
test_in = torch.rand(2, 3, 4, 5)
assert (test_in is drop_path(test_in))
drop_path = DropPath(drop_prob=0.1)
drop_path.training = False
test_in = torch.rand(2, 3, 4, 5)
assert (test_in is drop_path(test_in))
drop_path.training = True
assert (test_in is not drop_path(test_in))
|
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
constant_init(conv_module, 0.1)
assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1))
assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
constant_init(conv_module_no_bias, 0.1)
assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1))
|
def test_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
xavier_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
xavier_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
xavier_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
xavier_init(conv_module_no_bias)
|
def test_normal_init():
conv_module = nn.Conv2d(3, 16, 3)
normal_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
normal_init(conv_module_no_bias)
|
def test_trunc_normal_init():
def _random_float(a, b):
return (((b - a) * random.random()) + a)
def _is_trunc_normal(tensor, mean, std, a, b):
z_samples = ((tensor.view((- 1)) - mean) / std)
z_samples = z_samples.tolist()
a0 = ((a - mean) / std)
b0 = ((b - mean) / std)
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return (p_value > 0.0001)
conv_module = nn.Conv2d(3, 16, 3)
mean = _random_float((- 3), 3)
std = _random_float(0.01, 1)
a = _random_float((mean - (2 * std)), mean)
b = _random_float(mean, (mean + (2 * std)))
trunc_normal_init(conv_module, mean, std, a, b, bias=0.1)
assert _is_trunc_normal(conv_module.weight, mean, std, a, b)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
trunc_normal_init(conv_module_no_bias)
|
def test_uniform_init():
conv_module = nn.Conv2d(3, 16, 3)
uniform_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
uniform_init(conv_module_no_bias)
|
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias)
|
def test_caffe_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
caffe2_xavier_init(conv_module)
|
def test_bias_init_with_prob():
conv_module = nn.Conv2d(3, 16, 3)
prior_prob = 0.1
normal_init(conv_module, bias=bias_init_with_prob(0.1))
bias = float((- np.log(((1 - prior_prob) / prior_prob))))
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
|
def test_constaninit():
'test ConstantInit class.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = ConstantInit(val=1, bias=2, layer='Conv2d')
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0)))
assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0)))
func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
func(model)
res = bias_init_with_prob(0.01)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd')
func(model)
assert torch.all((model[0].weight == 4.0))
assert torch.all((model[2].weight == 4.0))
assert torch.all((model[0].bias == 5.0))
assert torch.all((model[2].bias == 5.0))
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias='1')
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias_prob='1')
with pytest.raises(TypeError):
func = ConstantInit(val=1, layer=1)
|
def test_xavierinit():
'test XavierInit class.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1))
assert (not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1)))
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0))
res = bias_init_with_prob(0.01)
func(model)
assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd')
func(model)
assert torch.all((model[0].weight == 4.0))
assert torch.all((model[2].weight == 4.0))
assert torch.all((model[0].bias == 5.0))
assert torch.all((model[2].bias == 5.0))
func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd')
func(model)
assert (not torch.all((model[0].weight == 4.0)))
assert (not torch.all((model[2].weight == 4.0)))
assert torch.all((model[0].bias == res))
assert torch.all((model[2].bias == res))
with pytest.raises(TypeError):
func = XavierInit(bias='0.1', layer='Conv2d')
with pytest.raises(TypeError):
func = XavierInit(bias=0.1, layer=1)
|
def test_normalinit():
'test Normalinit class.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = NormalInit(mean=100, std=1e-05, bias=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.0))
assert model[2].weight.allclose(torch.tensor(100.0))
assert model[0].bias.allclose(torch.tensor(200.0))
assert model[2].bias.allclose(torch.tensor(200.0))
func = NormalInit(mean=300, std=1e-05, bias_prob=0.01, layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.0))
assert model[2].weight.allclose(torch.tensor(300.0))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = NormalInit(mean=300, std=1e-05, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.0))
assert model[2].weight.allclose(torch.tensor(300.0))
assert torch.all((model[0].bias == res))
assert torch.all((model[2].bias == res))
|
def test_truncnormalinit():
'test TruncNormalInit class.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = TruncNormalInit(mean=100, std=1e-05, bias=200, a=0, b=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.0))
assert model[2].weight.allclose(torch.tensor(100.0))
assert model[0].bias.allclose(torch.tensor(200.0))
assert model[2].bias.allclose(torch.tensor(200.0))
func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.0))
assert model[2].weight.allclose(torch.tensor(300.0))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.0))
assert model[2].weight.allclose(torch.tensor(300.0))
assert torch.all((model[0].bias == res))
assert torch.all((model[2].bias == res))
|
def test_uniforminit():
'"test UniformInit class.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear'])
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0))
func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10)
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 100.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 100.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd')
res = bias_init_with_prob(0.01)
func(model)
assert torch.all((model[0].weight == 100.0))
assert torch.all((model[2].weight == 100.0))
assert torch.all((model[0].bias == res))
assert torch.all((model[2].bias == res))
|
def test_kaiminginit():
'test KaimingInit class.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = KaimingInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1)))
func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear'])
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0))
func(model)
assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0))
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = KaimingInit(bias=0.1, layer='_ConvNd')
func(model)
assert torch.all((model[0].bias == 0.1))
assert torch.all((model[2].bias == 0.1))
func = KaimingInit(a=100, bias=10, layer='_ConvNd')
constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd')
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0))
func(model)
assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)))
assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0))
|
def test_caffe2xavierinit():
'test Caffe2XavierInit.'
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = Caffe2XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1)))
|
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3)
|
def test_pretrainedinit():
'test PretrainedInit class.'
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
modelB = FooModule()
funcB = PretrainedInit(checkpoint='modelA.pth')
modelC = nn.Linear(1, 2)
funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.')
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
funcB(modelB)
assert torch.equal(modelB.linear.weight, torch.full(modelB.linear.weight.shape, 1.0))
assert torch.equal(modelB.linear.bias, torch.full(modelB.linear.bias.shape, 2.0))
assert torch.equal(modelB.conv2d.weight, torch.full(modelB.conv2d.weight.shape, 1.0))
assert torch.equal(modelB.conv2d.bias, torch.full(modelB.conv2d.bias.shape, 2.0))
assert torch.equal(modelB.conv2d_2.weight, torch.full(modelB.conv2d_2.weight.shape, 1.0))
assert torch.equal(modelB.conv2d_2.bias, torch.full(modelB.conv2d_2.bias.shape, 2.0))
funcC(modelC)
assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.0))
assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.0))
|
def test_initialize():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
foonet = FooModule()
init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0))
assert (init_cfg == dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2))
init_cfg = [dict(type='Constant', layer='Conv2d', val=1, bias=2), dict(type='Constant', layer='Linear', val=3, bias=4)]
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.0))
assert (init_cfg == [dict(type='Constant', layer='Conv2d', val=1, bias=2), dict(type='Constant', layer='Linear', val=3, bias=4)])
init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 1.0))
assert torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 2.0))
assert torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 1.0))
assert torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 2.0))
assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 3.0))
assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 4.0))
assert (init_cfg == dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_2', val=3, bias=4)))
init_cfg = dict(type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
initialize(foonet, init_cfg)
assert (not torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 5.0)))
assert (not torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 6.0)))
assert (not torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 5.0)))
assert (not torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 6.0)))
assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 5.0))
assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 6.0))
assert (init_cfg == dict(type='Constant', val=5, bias=6, override=dict(name='conv2d_2')))
init_cfg = dict(type='Pretrained', checkpoint='modelA.pth', override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 1.0))
assert torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 2.0))
assert torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 1.0))
assert torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 2.0))
assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 3.0))
assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 4.0))
assert (init_cfg == dict(type='Pretrained', checkpoint='modelA.pth', override=dict(type='Constant', name='conv2d_2', val=3, bias=4)))
with pytest.raises(TypeError):
init_cfg = 'init_cfg'
initialize(foonet, init_cfg)
with pytest.raises(TypeError):
init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override='conv')
initialize(foonet, init_cfg)
with pytest.raises(RuntimeError):
init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_3', val=3, bias=4))
initialize(foonet, init_cfg)
with pytest.raises(RuntimeError):
init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=[dict(type='Constant', name='conv2d', val=3, bias=4), dict(type='Constant', name='conv2d_3', val=5, bias=6)])
initialize(foonet, init_cfg)
with pytest.raises(ValueError):
init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
with pytest.raises(ValueError):
init_cfg = dict(type='Constant', val=1, bias=2, override=dict(type='Constant', val=3, bias=4))
initialize(foonet, init_cfg)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation):
'\n CommandLine:\n xdoctest -m tests/test_wrappers.py test_conv2d\n '
x_empty = torch.randn(0, in_channel, in_h, in_w)
torch.manual_seed(0)
wrapper = Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_h, in_w).requires_grad_(True)
torch.manual_seed(0)
ref = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_channel, in_h, in_w)
wrapper = Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper.eval()
wrapper(x_empty)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation):
'\n CommandLine:\n xdoctest -m tests/test_wrappers.py test_conv3d\n '
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
torch.manual_seed(0)
wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_t, in_h, in_w).requires_grad_(True)
torch.manual_seed(0)
ref = nn.Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper.eval()
wrapper(x_empty)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv_transposed_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation):
x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True)
op = (min(stride, dilation) - 1)
if (torch.__version__ == 'parrots'):
op = 0
torch.manual_seed(0)
wrapper = ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_h, in_w)
torch.manual_seed(0)
ref = nn.ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_channel, in_h, in_w)
wrapper = ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op)
wrapper.eval()
wrapper(x_empty)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv_transposed_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation):
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True)
op = (min(stride, dilation) - 1)
torch.manual_seed(0)
wrapper = ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_t, in_h, in_w)
torch.manual_seed(0)
ref = nn.ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
wrapper = ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op)
wrapper.eval()
wrapper(x_empty)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)])
def test_max_pool_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation):
x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True)
wrapper = MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_h, in_w)
ref = nn.MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
assert torch.equal(wrapper(x_normal), ref_out)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
@pytest.mark.skipif(((torch.__version__ == 'parrots') and (not torch.cuda.is_available())), reason='parrots requires CUDA support')
def test_max_pool_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation):
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True)
wrapper = MaxPool3d(kernel_size, stride=stride, padding=padding, dilation=dilation)
if (torch.__version__ == 'parrots'):
x_empty = x_empty.cuda()
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_t, in_h, in_w)
ref = nn.MaxPool3d(kernel_size, stride=stride, padding=padding, dilation=dilation)
if (torch.__version__ == 'parrots'):
x_normal = x_normal.cuda()
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
assert torch.equal(wrapper(x_normal), ref_out)
|
@patch('torch.__version__', torch_version)
@pytest.mark.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1), (20, 20, 3, 3)])
def test_linear(in_w, in_h, in_feature, out_feature):
x_empty = torch.randn(0, in_feature, requires_grad=True)
torch.manual_seed(0)
wrapper = Linear(in_feature, out_feature)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_feature)
torch.manual_seed(0)
ref = nn.Linear(in_feature, out_feature)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_feature)
wrapper = Linear(in_feature, out_feature)
wrapper.eval()
wrapper(x_empty)
|
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10))
def test_nn_op_forward_called():
for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']:
with patch(f'torch.nn.{m}.forward') as nn_module_forward:
x_empty = torch.randn(0, 3, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_empty)
nn_module_forward.assert_called_with(x_empty)
x_normal = torch.randn(1, 3, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_normal)
nn_module_forward.assert_called_with(x_normal)
for m in ['Conv3d', 'ConvTranspose3d', 'MaxPool3d']:
with patch(f'torch.nn.{m}.forward') as nn_module_forward:
x_empty = torch.randn(0, 3, 10, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_empty)
nn_module_forward.assert_called_with(x_empty)
x_normal = torch.randn(1, 3, 10, 10, 10)
wrapper = eval(m)(3, 2, 1)
wrapper(x_normal)
nn_module_forward.assert_called_with(x_normal)
with patch('torch.nn.Linear.forward') as nn_module_forward:
x_empty = torch.randn(0, 3)
wrapper = Linear(3, 3)
wrapper(x_empty)
nn_module_forward.assert_called_with(x_empty)
x_normal = torch.randn(1, 3)
wrapper = Linear(3, 3)
wrapper(x_normal)
nn_module_forward.assert_called_with(x_normal)
|
@contextmanager
def build_temporary_directory():
'Build a temporary directory containing many files to test\n ``FileClient.list_dir_or_file``.\n\n . \n\n | -- dir1 \n\n | -- | -- text3.txt \n\n | -- dir2 \n\n | -- | -- dir3 \n\n | -- | -- | -- text4.txt \n\n | -- | -- img.jpg \n\n | -- text1.txt \n\n | -- text2.txt \n\n '
with tempfile.TemporaryDirectory() as tmp_dir:
text1 = (Path(tmp_dir) / 'text1.txt')
text1.open('w').write('text1')
text2 = (Path(tmp_dir) / 'text2.txt')
text2.open('w').write('text2')
dir1 = (Path(tmp_dir) / 'dir1')
dir1.mkdir()
text3 = (dir1 / 'text3.txt')
text3.open('w').write('text3')
dir2 = (Path(tmp_dir) / 'dir2')
dir2.mkdir()
jpg1 = (dir2 / 'img.jpg')
jpg1.open('wb').write(b'img')
dir3 = (dir2 / 'dir3')
dir3.mkdir()
text4 = (dir3 / 'text4.txt')
text4.open('w').write('text4')
(yield tmp_dir)
|
@contextmanager
def delete_and_reset_method(obj, method):
method_obj = deepcopy(getattr(type(obj), method))
try:
delattr(type(obj), method)
(yield)
finally:
setattr(type(obj), method, method_obj)
|
class MockS3Client():
def __init__(self, enable_mc=True):
self.enable_mc = enable_mc
def Get(self, filepath):
with open(filepath, 'rb') as f:
content = f.read()
return content
|
class MockPetrelClient():
def __init__(self, enable_mc=True, enable_multi_cluster=False):
self.enable_mc = enable_mc
self.enable_multi_cluster = enable_multi_cluster
def Get(self, filepath):
with open(filepath, 'rb') as f:
content = f.read()
return content
def put(self):
pass
def delete(self):
pass
def contains(self):
pass
def isdir(self):
pass
def list(self, dir_path):
for entry in os.scandir(dir_path):
if ((not entry.name.startswith('.')) and entry.is_file()):
(yield entry.name)
elif osp.isdir(entry.path):
(yield (entry.name + '/'))
|
class MockMemcachedClient():
def __init__(self, server_list_cfg, client_cfg):
pass
def Get(self, filepath, buffer):
with open(filepath, 'rb') as f:
buffer.content = f.read()
|
class TestFileClient():
@classmethod
def setup_class(cls):
cls.test_data_dir = (Path(__file__).parent / 'data')
cls.img_path = (cls.test_data_dir / 'color.jpg')
cls.img_shape = (300, 400, 3)
cls.text_path = (cls.test_data_dir / 'filelist.txt')
def test_error(self):
with pytest.raises(ValueError):
FileClient('hadoop')
def test_disk_backend(self):
disk_backend = FileClient('disk')
assert (disk_backend.name == 'HardDiskBackend')
assert disk_backend.allow_symlink
img_bytes = disk_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (self.img_path.open('rb').read() == img_bytes)
assert (img.shape == self.img_shape)
img_bytes = disk_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (self.img_path.open('rb').read() == img_bytes)
assert (img.shape == self.img_shape)
value_buf = disk_backend.get_text(self.text_path)
assert (self.text_path.open('r').read() == value_buf)
value_buf = disk_backend.get_text(str(self.text_path))
assert (self.text_path.open('r').read() == value_buf)
with tempfile.TemporaryDirectory() as tmp_dir:
filepath1 = (Path(tmp_dir) / 'test.jpg')
disk_backend.put(b'disk', filepath1)
assert (filepath1.open('rb').read() == b'disk')
_filepath1 = ((Path(tmp_dir) / 'not_existed_dir1') / 'test.jpg')
disk_backend.put(b'disk', _filepath1)
assert (_filepath1.open('rb').read() == b'disk')
filepath2 = (Path(tmp_dir) / 'test.txt')
disk_backend.put_text('disk', filepath2)
assert (filepath2.open('r').read() == 'disk')
_filepath2 = ((Path(tmp_dir) / 'not_existed_dir2') / 'test.txt')
disk_backend.put_text('disk', _filepath2)
assert (_filepath2.open('r').read() == 'disk')
assert disk_backend.isfile(filepath2)
assert (not disk_backend.isfile((Path(tmp_dir) / 'not/existed/path')))
disk_backend.remove(filepath2)
assert (not disk_backend.exists(filepath2))
with disk_backend.get_local_path(filepath1) as path:
assert (str(filepath1) == path)
assert osp.isfile(filepath1)
disk_dir = '/path/of/your/directory'
assert (disk_backend.join_path(disk_dir, 'file') == osp.join(disk_dir, 'file'))
assert (disk_backend.join_path(disk_dir, 'dir', 'file') == osp.join(disk_dir, 'dir', 'file'))
with build_temporary_directory() as tmp_dir:
assert (set(disk_backend.list_dir_or_file(tmp_dir)) == set(['dir1', 'dir2', 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, recursive=True)) == set(['dir1', osp.join('dir1', 'text3.txt'), 'dir2', osp.join('dir2', 'dir3'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_file=False)) == set(['dir1', 'dir2']))
with pytest.raises(TypeError, match='`suffix` should be None when `list_dir` is True'):
disk_backend.client.list_dir_or_file(tmp_dir, list_file=False, suffix='.txt')
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_file=False, recursive=True)) == set(['dir1', 'dir2', osp.join('dir2', 'dir3')]))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False)) == set(['text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt')) == set(['text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'))) == set(['text1.txt', 'text2.txt']))
with pytest.raises(TypeError, match='`suffix` must be a string or tuple of strings'):
disk_backend.client.list_dir_or_file(tmp_dir, list_dir=False, suffix=['.txt', '.jpg'])
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt', recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), 'text1.txt', 'text2.txt']))
assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'), recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt']))
@patch('ceph.S3Client', MockS3Client)
def test_ceph_backend(self):
ceph_backend = FileClient('ceph')
assert (not ceph_backend.allow_symlink)
with pytest.raises(NotImplementedError):
ceph_backend.get_text(self.text_path)
with pytest.raises(NotImplementedError):
ceph_backend.get_text(str(self.text_path))
img_bytes = ceph_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
img_bytes = ceph_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
with pytest.raises(AssertionError):
FileClient('ceph', path_mapping=1)
ceph_path = 's3://user/data'
ceph_backend = FileClient('ceph', path_mapping={str(self.test_data_dir): ceph_path})
ceph_backend.client._client.Get = MagicMock(return_value=ceph_backend.client._client.Get(self.img_path))
img_bytes = ceph_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
ceph_backend.client._client.Get.assert_called_with(str(self.img_path).replace(str(self.test_data_dir), ceph_path))
@patch('petrel_client.client.Client', MockPetrelClient)
@pytest.mark.parametrize('backend,prefix', [('petrel', None), (None, 's3')])
def test_petrel_backend(self, backend, prefix):
petrel_backend = FileClient(backend=backend, prefix=prefix)
assert (not petrel_backend.allow_symlink)
img_bytes = petrel_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
img_bytes = petrel_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
with pytest.raises(AssertionError):
FileClient('petrel', path_mapping=1)
petrel_dir = 's3://user/data'
petrel_backend = FileClient('petrel', path_mapping={str(self.test_data_dir): petrel_dir})
assert (petrel_backend.client._map_path(str(self.img_path)) == str(self.img_path).replace(str(self.test_data_dir), petrel_dir))
petrel_path = f'{petrel_dir}/test.jpg'
petrel_backend = FileClient('petrel')
assert (petrel_backend.client._format_path('s3://user\\data\\test.jpg') == petrel_path)
with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get:
assert (petrel_backend.get(petrel_path) == b'petrel')
mock_get.assert_called_once_with(petrel_path)
with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get:
assert (petrel_backend.get_text(petrel_path) == 'petrel')
mock_get.assert_called_once_with(petrel_path)
with patch.object(petrel_backend.client._client, 'put') as mock_put:
petrel_backend.put(b'petrel', petrel_path)
mock_put.assert_called_once_with(petrel_path, b'petrel')
with patch.object(petrel_backend.client._client, 'put') as mock_put:
petrel_backend.put_text('petrel', petrel_path)
mock_put.assert_called_once_with(petrel_path, b'petrel')
assert has_method(petrel_backend.client._client, 'delete')
with delete_and_reset_method(petrel_backend.client._client, 'delete'):
assert (not has_method(petrel_backend.client._client, 'delete'))
with pytest.raises(NotImplementedError):
petrel_backend.remove(petrel_path)
with patch.object(petrel_backend.client._client, 'delete') as mock_delete:
petrel_backend.remove(petrel_path)
mock_delete.assert_called_once_with(petrel_path)
assert has_method(petrel_backend.client._client, 'contains')
assert has_method(petrel_backend.client._client, 'isdir')
with delete_and_reset_method(petrel_backend.client._client, 'contains'), delete_and_reset_method(petrel_backend.client._client, 'isdir'):
assert (not has_method(petrel_backend.client._client, 'contains'))
assert (not has_method(petrel_backend.client._client, 'isdir'))
with pytest.raises(NotImplementedError):
petrel_backend.exists(petrel_path)
with patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains:
assert petrel_backend.exists(petrel_path)
mock_contains.assert_called_once_with(petrel_path)
assert has_method(petrel_backend.client._client, 'isdir')
with delete_and_reset_method(petrel_backend.client._client, 'isdir'):
assert (not has_method(petrel_backend.client._client, 'isdir'))
with pytest.raises(NotImplementedError):
petrel_backend.isdir(petrel_path)
with patch.object(petrel_backend.client._client, 'isdir', return_value=True) as mock_isdir:
assert petrel_backend.isdir(petrel_dir)
mock_isdir.assert_called_once_with(petrel_dir)
assert has_method(petrel_backend.client._client, 'contains')
with delete_and_reset_method(petrel_backend.client._client, 'contains'):
assert (not has_method(petrel_backend.client._client, 'contains'))
with pytest.raises(NotImplementedError):
petrel_backend.isfile(petrel_path)
with patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains:
assert petrel_backend.isfile(petrel_path)
mock_contains.assert_called_once_with(petrel_path)
assert (petrel_backend.join_path(petrel_dir, 'file') == f'{petrel_dir}/file')
assert (petrel_backend.join_path(f'{petrel_dir}/', 'file') == f'{petrel_dir}/file')
assert (petrel_backend.join_path(petrel_dir, 'dir', 'file') == f'{petrel_dir}/dir/file')
with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get, patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains:
with petrel_backend.get_local_path(petrel_path) as path:
assert (Path(path).open('rb').read() == b'petrel')
assert (not osp.isfile(path))
mock_get.assert_called_once_with(petrel_path)
mock_contains.assert_called_once_with(petrel_path)
assert has_method(petrel_backend.client._client, 'list')
with delete_and_reset_method(petrel_backend.client._client, 'list'):
assert (not has_method(petrel_backend.client._client, 'list'))
with pytest.raises(NotImplementedError):
list(petrel_backend.list_dir_or_file(petrel_dir))
with build_temporary_directory() as tmp_dir:
assert (set(petrel_backend.list_dir_or_file(tmp_dir)) == set(['dir1', 'dir2', 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, recursive=True)) == set(['dir1', '/'.join(('dir1', 'text3.txt')), 'dir2', '/'.join(('dir2', 'dir3')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_file=False)) == set(['dir1', 'dir2']))
with pytest.raises(TypeError, match='`list_dir` should be False when `suffix` is not None'):
petrel_backend.client.list_dir_or_file(tmp_dir, list_file=False, suffix='.txt')
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_file=False, recursive=True)) == set(['dir1', 'dir2', '/'.join(('dir2', 'dir3'))]))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False)) == set(['text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt')) == set(['text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'))) == set(['text1.txt', 'text2.txt']))
with pytest.raises(TypeError, match='`suffix` must be a string or tuple of strings'):
petrel_backend.client.list_dir_or_file(tmp_dir, list_dir=False, suffix=['.txt', '.jpg'])
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt', recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), 'text1.txt', 'text2.txt']))
assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'), recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt']))
@patch('mc.MemcachedClient.GetInstance', MockMemcachedClient)
@patch('mc.pyvector', MagicMock)
@patch('mc.ConvertBuffer', (lambda x: x.content))
def test_memcached_backend(self):
mc_cfg = dict(server_list_cfg='', client_cfg='', sys_path=None)
mc_backend = FileClient('memcached', **mc_cfg)
assert (not mc_backend.allow_symlink)
with pytest.raises(NotImplementedError):
mc_backend.get_text(self.text_path)
with pytest.raises(NotImplementedError):
mc_backend.get_text(str(self.text_path))
img_bytes = mc_backend.get(self.img_path)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
img_bytes = mc_backend.get(str(self.img_path))
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
def test_lmdb_backend(self):
lmdb_path = (self.test_data_dir / 'demo.lmdb')
lmdb_backend = FileClient('lmdb', db_path=lmdb_path)
assert (not lmdb_backend.allow_symlink)
with pytest.raises(NotImplementedError):
lmdb_backend.get_text(self.text_path)
img_bytes = lmdb_backend.get('baboon')
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == (120, 125, 3))
lmdb_backend = FileClient('lmdb', db_path=str(lmdb_path))
with pytest.raises(NotImplementedError):
lmdb_backend.get_text(str(self.text_path))
img_bytes = lmdb_backend.get('baboon')
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == (120, 125, 3))
@pytest.mark.parametrize('backend,prefix', [('http', None), (None, 'http')])
def test_http_backend(self, backend, prefix):
http_backend = FileClient(backend=backend, prefix=prefix)
img_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/color.jpg'
text_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/filelist.txt'
assert (not http_backend.allow_symlink)
with pytest.raises(Exception):
http_backend.get(self.img_path)
with pytest.raises(Exception):
http_backend.get(str(self.img_path))
with pytest.raises(Exception):
http_backend.get_text(self.text_path)
with pytest.raises(Exception):
http_backend.get_text(str(self.text_path))
img_bytes = http_backend.get(img_url)
img = mmcv.imfrombytes(img_bytes)
assert (img.shape == self.img_shape)
value_buf = http_backend.get_text(text_url)
assert (self.text_path.open('r').read() == value_buf)
with http_backend.get_local_path(img_url) as path:
assert (mmcv.imread(path).shape == self.img_shape)
assert (not osp.isfile(path))
def test_new_magic_method(self):
class DummyBackend1(BaseStorageBackend):
def get(self, filepath):
return filepath
def get_text(self, filepath, encoding='utf-8'):
return filepath
FileClient.register_backend('dummy_backend', DummyBackend1)
client1 = FileClient(backend='dummy_backend')
client2 = FileClient(backend='dummy_backend')
assert (client1 is client2)
class DummyBackend2(BaseStorageBackend):
def get(self, filepath):
pass
def get_text(self, filepath):
pass
FileClient.register_backend('dummy_backend', DummyBackend2, force=True)
client3 = FileClient(backend='dummy_backend')
client4 = FileClient(backend='dummy_backend')
assert (client3 is not client4)
def test_parse_uri_prefix(self):
with pytest.raises(AssertionError):
FileClient.parse_uri_prefix(None)
with pytest.raises(AssertionError):
FileClient.parse_uri_prefix([])
assert (FileClient.parse_uri_prefix(self.img_path) is None)
assert (FileClient.parse_uri_prefix(str(self.img_path)) is None)
img_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/color.jpg'
assert (FileClient.parse_uri_prefix(img_url) == 'https')
img_url = 's3://your_bucket/img.png'
assert (FileClient.parse_uri_prefix(img_url) == 's3')
img_url = 'clusterName:s3://your_bucket/img.png'
assert (FileClient.parse_uri_prefix(img_url) == 's3')
def test_infer_client(self):
file_client_args = {'backend': 'disk'}
client = FileClient.infer_client(file_client_args)
assert (client.name == 'HardDiskBackend')
client = FileClient.infer_client(uri=self.img_path)
assert (client.name == 'HardDiskBackend')
file_client_args = {'backend': 'petrel'}
client = FileClient.infer_client(file_client_args)
assert (client.name == 'PetrelBackend')
uri = 's3://user_data'
client = FileClient.infer_client(uri=uri)
assert (client.name == 'PetrelBackend')
def test_register_backend(self):
with pytest.raises(TypeError):
class TestClass1():
pass
FileClient.register_backend(1, TestClass1)
with pytest.raises(TypeError):
FileClient.register_backend('int', 0)
with pytest.raises(TypeError):
class TestClass1():
pass
FileClient.register_backend('TestClass1', TestClass1)
class ExampleBackend(BaseStorageBackend):
def get(self, filepath):
return filepath
def get_text(self, filepath, encoding='utf-8'):
return filepath
FileClient.register_backend('example', ExampleBackend)
example_backend = FileClient('example')
assert (example_backend.get(self.img_path) == self.img_path)
assert (example_backend.get_text(self.text_path) == self.text_path)
assert ('example' in FileClient._backends)
class Example2Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes2'
def get_text(self, filepath, encoding='utf-8'):
return 'text2'
with pytest.raises(KeyError):
FileClient.register_backend('example', Example2Backend)
FileClient.register_backend('example', Example2Backend, force=True)
example_backend = FileClient('example')
assert (example_backend.get(self.img_path) == b'bytes2')
assert (example_backend.get_text(self.text_path) == 'text2')
@FileClient.register_backend(name='example3')
class Example3Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes3'
def get_text(self, filepath, encoding='utf-8'):
return 'text3'
example_backend = FileClient('example3')
assert (example_backend.get(self.img_path) == b'bytes3')
assert (example_backend.get_text(self.text_path) == 'text3')
assert ('example3' in FileClient._backends)
with pytest.raises(KeyError):
@FileClient.register_backend(name='example3')
class Example4Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes4'
def get_text(self, filepath, encoding='utf-8'):
return 'text4'
@FileClient.register_backend(name='example3', force=True)
class Example5Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes5'
def get_text(self, filepath, encoding='utf-8'):
return 'text5'
example_backend = FileClient('example3')
assert (example_backend.get(self.img_path) == b'bytes5')
assert (example_backend.get_text(self.text_path) == 'text5')
class Example6Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes6'
def get_text(self, filepath, encoding='utf-8'):
return 'text6'
FileClient.register_backend('example4', Example6Backend, force=True, prefixes='example4_prefix')
example_backend = FileClient('example4')
assert (example_backend.get(self.img_path) == b'bytes6')
assert (example_backend.get_text(self.text_path) == 'text6')
example_backend = FileClient(prefix='example4_prefix')
assert (example_backend.get(self.img_path) == b'bytes6')
assert (example_backend.get_text(self.text_path) == 'text6')
example_backend = FileClient('example4', prefix='example4_prefix')
assert (example_backend.get(self.img_path) == b'bytes6')
assert (example_backend.get_text(self.text_path) == 'text6')
class Example7Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes7'
def get_text(self, filepath, encoding='utf-8'):
return 'text7'
FileClient.register_backend('example5', Example7Backend, force=True, prefixes=['example5_prefix1', 'example5_prefix2'])
example_backend = FileClient('example5')
assert (example_backend.get(self.img_path) == b'bytes7')
assert (example_backend.get_text(self.text_path) == 'text7')
example_backend = FileClient(prefix='example5_prefix1')
assert (example_backend.get(self.img_path) == b'bytes7')
assert (example_backend.get_text(self.text_path) == 'text7')
example_backend = FileClient(prefix='example5_prefix2')
assert (example_backend.get(self.img_path) == b'bytes7')
assert (example_backend.get_text(self.text_path) == 'text7')
class Example8Backend(BaseStorageBackend):
def get(self, filepath):
return b'bytes8'
def get_text(self, filepath, encoding='utf-8'):
return 'text8'
FileClient.register_backend('example6', Example8Backend, force=True, prefixes='example6_prefix')
example_backend = FileClient('example6')
assert (example_backend.get(self.img_path) == b'bytes8')
assert (example_backend.get_text(self.text_path) == 'text8')
example_backend = FileClient('example6', prefix='example4_prefix')
assert (example_backend.get(self.img_path) == b'bytes8')
assert (example_backend.get_text(self.text_path) == 'text8')
|
def _test_handler(file_format, test_obj, str_checker, mode='r+'):
dump_str = mmcv.dump(test_obj, file_format=file_format)
str_checker(dump_str)
tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test_dump')
mmcv.dump(test_obj, tmp_filename, file_format=file_format)
assert osp.isfile(tmp_filename)
load_obj = mmcv.load(tmp_filename, file_format=file_format)
assert (load_obj == test_obj)
os.remove(tmp_filename)
method = ('put' if ('b' in mode) else 'put_text')
with patch.object(PetrelBackend, method, return_value=None) as mock_method:
filename = 's3://path/of/your/file'
mmcv.dump(test_obj, filename, file_format=file_format)
mock_method.assert_called()
with tempfile.NamedTemporaryFile(mode, delete=False) as f:
tmp_filename = f.name
mmcv.dump(test_obj, f, file_format=file_format)
assert osp.isfile(tmp_filename)
with open(tmp_filename, mode) as f:
load_obj = mmcv.load(f, file_format=file_format)
assert (load_obj == test_obj)
os.remove(tmp_filename)
tmp_filename = osp.join(tempfile.gettempdir(), ('mmcv_test_dump.' + file_format))
mmcv.dump(test_obj, tmp_filename)
assert osp.isfile(tmp_filename)
load_obj = mmcv.load(tmp_filename)
assert (load_obj == test_obj)
os.remove(tmp_filename)
|
def test_json():
def json_checker(dump_str):
assert (dump_str in ['[{"a": "abc", "b": 1}, 2, "c"]', '[{"b": 1, "a": "abc"}, 2, "c"]'])
_test_handler('json', obj_for_test, json_checker)
|
def test_yaml():
def yaml_checker(dump_str):
assert (dump_str in ['- {a: abc, b: 1}\n- 2\n- c\n', '- {b: 1, a: abc}\n- 2\n- c\n', '- a: abc\n b: 1\n- 2\n- c\n', '- b: 1\n a: abc\n- 2\n- c\n'])
_test_handler('yaml', obj_for_test, yaml_checker)
|
def test_pickle():
def pickle_checker(dump_str):
import pickle
assert (pickle.loads(dump_str) == obj_for_test)
_test_handler('pickle', obj_for_test, pickle_checker, mode='rb+')
|
def test_exception():
test_obj = [{'a': 'abc', 'b': 1}, 2, 'c']
with pytest.raises(ValueError):
mmcv.dump(test_obj)
with pytest.raises(TypeError):
mmcv.dump(test_obj, 'tmp.txt')
|
def test_register_handler():
@mmcv.register_handler('txt')
class TxtHandler1(mmcv.BaseFileHandler):
def load_from_fileobj(self, file):
return file.read()
def dump_to_fileobj(self, obj, file):
file.write(str(obj))
def dump_to_str(self, obj, **kwargs):
return str(obj)
@mmcv.register_handler(['txt1', 'txt2'])
class TxtHandler2(mmcv.BaseFileHandler):
def load_from_fileobj(self, file):
return file.read()
def dump_to_fileobj(self, obj, file):
file.write('\n')
file.write(str(obj))
def dump_to_str(self, obj, **kwargs):
return str(obj)
content = mmcv.load(osp.join(osp.dirname(__file__), 'data/filelist.txt'))
assert (content == '1.jpg\n2.jpg\n3.jpg\n4.jpg\n5.jpg')
tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.txt2')
mmcv.dump(content, tmp_filename)
with open(tmp_filename, 'r') as f:
written = f.read()
os.remove(tmp_filename)
assert (written == ('\n' + content))
|
def test_list_from_file():
filename = osp.join(osp.dirname(__file__), 'data/filelist.txt')
filelist = mmcv.list_from_file(filename)
assert (filelist == ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg'])
filelist = mmcv.list_from_file(filename, prefix='a/')
assert (filelist == ['a/1.jpg', 'a/2.jpg', 'a/3.jpg', 'a/4.jpg', 'a/5.jpg'])
filelist = mmcv.list_from_file(filename, offset=2)
assert (filelist == ['3.jpg', '4.jpg', '5.jpg'])
filelist = mmcv.list_from_file(filename, max_num=2)
assert (filelist == ['1.jpg', '2.jpg'])
filelist = mmcv.list_from_file(filename, offset=3, max_num=3)
assert (filelist == ['4.jpg', '5.jpg'])
with patch.object(HTTPBackend, 'get_text', return_value='1.jpg\n2.jpg\n3.jpg'):
filename = 'http://path/of/your/file'
filelist = mmcv.list_from_file(filename, file_client_args={'backend': 'http'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename, file_client_args={'prefix': 'http'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename)
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
with patch.object(PetrelBackend, 'get_text', return_value='1.jpg\n2.jpg\n3.jpg'):
filename = 's3://path/of/your/file'
filelist = mmcv.list_from_file(filename, file_client_args={'backend': 'petrel'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename, file_client_args={'prefix': 's3'})
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
filelist = mmcv.list_from_file(filename)
assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
|
def test_dict_from_file():
filename = osp.join(osp.dirname(__file__), 'data/mapping.txt')
mapping = mmcv.dict_from_file(filename)
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename, key_type=int)
assert (mapping == {1: 'cat', 2: ['dog', 'cow'], 3: 'panda'})
with patch.object(HTTPBackend, 'get_text', return_value='1 cat\n2 dog cow\n3 panda'):
filename = 'http://path/of/your/file'
mapping = mmcv.dict_from_file(filename, file_client_args={'backend': 'http'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename, file_client_args={'prefix': 'http'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename)
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
with patch.object(PetrelBackend, 'get_text', return_value='1 cat\n2 dog cow\n3 panda'):
filename = 's3://path/of/your/file'
mapping = mmcv.dict_from_file(filename, file_client_args={'backend': 'petrel'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename, file_client_args={'prefix': 's3'})
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
mapping = mmcv.dict_from_file(filename)
assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
|
@pytest.mark.skipif((torch is None), reason='requires torch library')
def test_tensor2imgs():
with pytest.raises(AssertionError):
tensor = np.random.rand(2, 3, 3)
mmcv.tensor2imgs(tensor)
with pytest.raises(AssertionError):
tensor = torch.randn(2, 3, 3)
mmcv.tensor2imgs(tensor)
with pytest.raises(AssertionError):
tensor = torch.randn(2, 4, 3, 3)
mmcv.tensor2imgs(tensor)
with pytest.raises(AssertionError):
tensor = torch.randn(2, 3, 5, 5)
mmcv.tensor2imgs(tensor, mean=(1,))
tensor = torch.randn(2, 1, 5, 5)
mmcv.tensor2imgs(tensor, mean=(0, 0, 0))
with pytest.raises(AssertionError):
tensor = torch.randn(2, 3, 5, 5)
mmcv.tensor2imgs(tensor, std=(1,))
tensor = torch.randn(2, 1, 5, 5)
mmcv.tensor2imgs(tensor, std=(1, 1, 1))
with pytest.raises(AssertionError):
tensor = torch.randn(2, 1, 5, 5)
mmcv.tensor2imgs(tensor, mean=(0,), std=(1,), to_rgb=True)
tensor = torch.randn(2, 3, 5, 5)
gts = [t.cpu().numpy().transpose(1, 2, 0).astype(np.uint8) for t in tensor.flip(1)]
outputs = mmcv.tensor2imgs(tensor, to_rgb=True)
for (gt, output) in zip(gts, outputs):
assert_array_equal(gt, output)
tensor = torch.randn(2, 3, 5, 5)
gts = [t.cpu().numpy().transpose(1, 2, 0).astype(np.uint8) for t in tensor]
outputs = mmcv.tensor2imgs(tensor, to_rgb=False)
for (gt, output) in zip(gts, outputs):
assert_array_equal(gt, output)
tensor = torch.randn(2, 1, 5, 5)
gts = [t.squeeze(0).cpu().numpy().astype(np.uint8) for t in tensor]
outputs = mmcv.tensor2imgs(tensor, to_rgb=False)
for (gt, output) in zip(gts, outputs):
assert_array_equal(gt, output)
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_set_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
assert (_get_mmcv_home() == mmcv_home)
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_default_mmcv_home():
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
assert (_get_mmcv_home() == os.path.expanduser(os.path.join(DEFAULT_CACHE_DIR, 'mmcv')))
model_urls = get_external_models()
assert (model_urls == mmcv.load(osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')))
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_get_external_models():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
ext_urls = get_external_models()
assert (ext_urls == {'train': 'https://localhost/train.pth', 'test': 'test.pth', 'val': 'val.pth', 'train_empty': 'train.pth'})
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
def test_get_deprecated_models():
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/')
os.environ[ENV_MMCV_HOME] = mmcv_home
dep_urls = get_deprecated_model_names()
assert (dep_urls == {'train_old': 'train', 'test_old': 'test'})
|
def load_from_http(url, map_location=None):
return ('url:' + url)
|
def load_url(url, map_location=None, model_dir=None):
return load_from_http(url)
|
def load(filepath, map_location=None):
return ('local:' + filepath)
|
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')])
@patch('mmcv.runner.checkpoint.load_from_http', load_from_http)
@patch('mmcv.runner.checkpoint.load_url', load_url)
@patch('torch.load', load)
def test_load_external_url():
url = _load_checkpoint('modelzoo://resnet50')
if (TORCH_VERSION < '1.9.0'):
assert (url == 'url:https://download.pytorch.org/models/resnet50-19c8e357.pth')
else:
assert (url == 'url:https://download.pytorch.org/models/resnet50-0676ba61.pth')
url = _load_checkpoint('torchvision://resnet50')
if (TORCH_VERSION < '1.9.0'):
assert (url == 'url:https://download.pytorch.org/models/resnet50-19c8e357.pth')
else:
assert (url == 'url:https://download.pytorch.org/models/resnet50-0676ba61.pth')
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
url = _load_checkpoint('open-mmlab://train')
assert (url == 'url:https://localhost/train.pth')
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
with pytest.warns(Warning, match='open-mmlab://train_old is deprecated in favor of open-mmlab://train'):
url = _load_checkpoint('open-mmlab://train_old')
assert (url == 'url:https://localhost/train.pth')
os.environ.pop(ENV_MMCV_HOME, None)
os.environ.pop(ENV_XDG_CACHE_HOME, None)
with pytest.warns(Warning, match='openmmlab://train_old is deprecated in favor of openmmlab://train'):
url = _load_checkpoint('openmmlab://train_old')
assert (url == 'url:https://localhost/train.pth')
os.environ.pop(ENV_MMCV_HOME, None)
mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home')
os.environ[ENV_MMCV_HOME] = mmcv_home
url = _load_checkpoint('open-mmlab://train')
assert (url == 'url:https://localhost/train.pth')
with pytest.raises(FileNotFoundError, match='train.pth can not be found.'):
_load_checkpoint('open-mmlab://train_empty')
url = _load_checkpoint('open-mmlab://test')
assert (url == f"local:{osp.join(_get_mmcv_home(), 'test.pth')}")
url = _load_checkpoint('open-mmlab://val')
assert (url == f"local:{osp.join(_get_mmcv_home(), 'val.pth')}")
url = _load_checkpoint('http://localhost/train.pth')
assert (url == 'url:http://localhost/train.pth')
with pytest.raises(FileNotFoundError, match='train.pth can not be found.'):
_load_checkpoint('train.pth')
url = _load_checkpoint(osp.join(_get_mmcv_home(), 'test.pth'))
assert (url == f"local:{osp.join(_get_mmcv_home(), 'test.pth')}")
|
@pytest.mark.parametrize('device', ['cpu', pytest.param('cuda', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))])
def test_active_rotated_filter(device):
feature = torch.tensor(np_feature, dtype=torch.float, device=device, requires_grad=True)
indices = torch.tensor(np_indices, dtype=torch.int, device=device)
output = active_rotated_filter(feature, indices)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.cpu().numpy(), expected_output, atol=0.001)
assert np.allclose(feature.grad.data.cpu().numpy(), expected_grad, atol=0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_paconv_assign_scores():
scores = torch.tensor([[[[0.06947571, 0.6065746], [0.28462553, 0.8378516], [0.7595994, 0.97220325], [0.519155, 0.766185]], [[0.15348864, 0.6051019], [0.21510637, 0.31916398], [0.00236845, 0.5842595], [0.6783676, 0.5216348]]], [[[0.23089725, 0.5568468], [0.7405102, 0.06438422], [0.6887394, 0.22089851], [0.0502342, 0.79228795]], [[0.44883424, 0.15427643], [0.13817799, 0.34856772], [0.7989621, 0.33788306], [0.15699774, 0.7693662]]]]).float().cuda()
scores.requires_grad_()
points = torch.tensor([[[[0.06001121, 0.92963666, 0.5753327, 0.7251477], [0.53563064, 0.23129565, 0.92366195, 0.44261628]], [[0.5770022, 0.56625944, 0.23560429, 0.11178821], [0.7735967, 0.95678777, 0.25468266, 0.02895975]], [[0.0589869, 0.09017515, 0.5977862, 0.02797985], [0.603862, 0.35991007, 0.85761684, 0.3096559]], [[0.22359002, 0.13983732, 0.5544243, 0.68863827], [0.85646236, 0.75651926, 0.8638947, 0.83600986]], [[0.45424145, 0.27458847, 0.6456112, 0.47162914], [0.15773582, 0.47645122, 0.79964715, 0.3323908]], [[0.8351399, 0.84696376, 0.9431732, 0.29418713], [0.77168906, 0.6996871, 0.19354361, 0.03392768]], [[0.30976456, 0.7074133, 0.581795, 0.976677], [0.69656056, 0.07199162, 0.4708506, 0.29117996]], [[0.5829035, 0.30201727, 0.76556486, 0.0935446], [0.88030535, 0.16129416, 0.9242525, 0.49545723]]], [[[0.50899494, 0.06482804, 0.44939405, 0.37704808], [0.47028124, 0.11969638, 0.62823206, 0.28560323]], [[0.40690207, 0.689753, 0.51636654, 0.23040164], [0.06935787, 0.00488842, 0.22462702, 0.09182382]], [[0.26611632, 0.00184339, 0.7730655, 0.5228131], [0.87776035, 0.77895886, 0.2787183, 0.16620636]], [[0.502574, 0.04039001, 0.5368497, 0.98379374], [0.40973026, 0.3238272, 0.9733018, 0.13988364]], [[0.04586202, 0.20983845, 0.20662665, 0.22270602], [0.60387236, 0.5155574, 0.51237285, 0.6528438]], [[0.45735973, 0.86821306, 0.61054605, 0.8370336], [0.45193362, 0.3734138, 0.7825672, 0.5699416]], [[0.44591594, 0.12447512, 0.09282011, 0.7055254], [0.25223452, 0.46696228, 0.7051136, 0.892151]], [[0.49615085, 0.47321403, 0.93138885, 0.7652197], [0.38766378, 0.30332977, 0.23131835, 0.02863514]]]]).float().cuda()
points.requires_grad_()
centers = torch.tensor([[[[0.83878064, 0.96658987, 0.8033424, 0.9598312], [0.45035273, 0.8768925, 0.977736, 0.54547966]], [[0.01041394, 0.597893, 0.36212963, 0.4410367], [0.94879234, 0.8372817, 0.21237361, 0.67945415]], [[0.5096087, 0.26401454, 0.60034937, 0.5417416], [0.87591463, 0.546456, 0.4096033, 0.16373193]], [[0.79547447, 0.1482386, 0.12840575, 0.45384115], [0.5640288, 0.944541, 0.5745328, 0.73229736]], [[0.93011934, 0.7406011, 0.62621707, 0.8677915], [0.91563636, 0.3595413, 0.6678378, 0.6085383]], [[0.22431666, 0.65617776, 0.7483924, 0.6263364], [0.30968404, 0.78204364, 0.14899081, 0.09628749]], [[0.73675203, 0.72104895, 0.4648038, 0.6101647], [0.7817645, 0.16572917, 0.3311919, 0.43407398]], [[0.8193154, 0.09559608, 0.05978829, 0.90262103], [0.4256065, 0.8165596, 0.8206446, 0.6604721]]], [[[0.7159653, 0.18600845, 0.21433902, 0.3159626], [0.3921569, 0.33221376, 0.5061177, 0.7961841]], [[0.95338356, 0.04785997, 0.67185795, 0.6538394], [0.4729132, 0.33404195, 0.17750603, 0.8445621]], [[0.6755793, 0.16193843, 0.75943846, 0.92123103], [0.2781859, 0.03114432, 0.710638, 0.52729136]], [[0.8376105, 0.10858494, 0.13208169, 0.365772], [0.5930795, 0.27390373, 0.14036089, 0.170403]], [[0.3479789, 0.89855295, 0.04844379, 0.9871029], [0.29781651, 0.0244137, 0.9179047, 0.8081611]], [[0.12460887, 0.44991326, 0.19382608, 0.35037738], [0.2773472, 0.4362057, 0.36757517, 0.5993509]], [[0.29630446, 0.90046406, 0.5417113, 0.13510644], [0.09623539, 0.04226565, 0.32001644, 0.44358212]], [[0.5274848, 0.82096446, 0.9415489, 0.7123748], [0.7537517, 0.8086482, 0.85345286, 0.7472754]]]]).float().cuda()
centers.requires_grad_()
knn_idx = torch.tensor([[[6, 7, 4, 6], [2, 4, 2, 4]], [[7, 1, 3, 2], [6, 0, 2, 6]]]).long().cuda()
aggregate = 'sum'
expected_output = torch.tensor([[[[(- 0.08134781), 0.03877336, (- 0.8212776), (- 0.2869547)], [(- 0.23378491), (- 0.24112664), (- 0.1600166), (- 0.4121864)]], [[(- 0.05780616), (- 0.12298299), (- 0.0370461), (- 0.07889931)], [(- 0.13956165), (- 0.02006848), (- 0.10940295), (- 0.0293439)]], [[0.09284145, 0.58250105, 0.5927749, 0.16774094], [0.27070042, 0.13422406, 0.2617501, 0.23416464]], [[(- 0.06121218), (- 0.09561322), (- 0.20408826), 0.08079343], [0.00944228, 0.03874819, 0.08404065, 0.04041629]]], [[[(- 0.2110898), (- 0.13335688), (- 0.09315082), 0.08512095], [0.09121774, 0.15976946, 0.23994486, 0.14350912]], [[(- 0.36167958), (- 0.14891288), (- 0.64470863), (- 0.0646704)], [(- 0.28276974), (- 0.08847666), (- 0.46904767), 0.20491874]], [[(- 0.34877953), (- 0.35533834), (- 0.25225785), (- 0.4638189)], [(- 0.1420663), 0.09467781, 0.17088932, 0.22580585]], [[(- 0.3879708), (- 0.3991068), 0.05276498, (- 0.46989647)], [0.32522714, (- 0.02163534), 0.21604237, 0.4346682]]]]).float()
output = assign_score_withk(scores, points, centers, knn_idx, aggregate)
assert torch.allclose(output.detach().cpu(), expected_output, atol=1e-06)
loss = output.sum()
loss.backward()
expected_scores_grad = torch.tensor([[[[0.04288036, (- 0.18217683)], [(- 0.78873926), 0.7485497], [(- 0.6866992), 0.05346543], [0.04288036, (- 0.18217683)]], [[(- 1.1407862), 0.13533896], [(- 0.06964391), (- 0.22948086)], [(- 1.1407862), 0.13533896], [(- 0.06964391), (- 0.22948086)]]], [[[(- 0.3363995), (- 2.212181)], [(- 1.1589496), (- 2.7724311)], [(- 0.9387654), (- 1.3163853)], [(- 1.4385346), (- 1.0614843)]], [[(- 0.5048497), 1.4143617], [(- 0.47332114), 0.6017133], [(- 0.30974793), 1.1995442], [(- 0.5048497), 1.4143617]]]]).float()
expected_points_grad = torch.tensor([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.15585709, 0.15585709, 0.15585709, 0.15585709], [1.1893613, 1.1893613, 1.1893613, 1.1893613]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[1.6530733, 1.6530733, 1.6530733, 1.6530733], [1.8130021, 1.8130021, 1.8130021, 1.8130021]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.58863074, 0.58863074, 0.58863074, 0.58863074], [1.3727596, 1.3727596, 1.3727596, 1.3727596]], [[0.28462553, 0.28462553, 0.28462553, 0.28462553], [0.8378516, 0.8378516, 0.8378516, 0.8378516]]], [[[0.13817799, 0.13817799, 0.13817799, 0.13817799], [0.34856772, 0.34856772, 0.34856772, 0.34856772]], [[0.7405102, 0.7405102, 0.7405102, 0.7405102], [0.06438422, 0.06438422, 0.06438422, 0.06438422]], [[0.8491963, 0.8491963, 0.8491963, 0.8491963], [1.1301711, 1.1301711, 1.1301711, 1.1301711]], [[0.6887394, 0.6887394, 0.6887394, 0.6887394], [0.22089851, 0.22089851, 0.22089851, 0.22089851]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.605832, 0.605832, 0.605832, 0.605832], [0.92364264, 0.92364264, 0.92364264, 0.92364264]], [[0.23089725, 0.23089725, 0.23089725, 0.23089725], [0.5568468, 0.5568468, 0.5568468, 0.5568468]]]]).float()
expected_centers_grad = torch.tensor([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.0493311), (- 1.0493311), (- 1.0493311), (- 1.0493311)], [(- 2.0301602), (- 2.0301602), (- 2.0301602), (- 2.0301602)]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.6328557), (- 1.6328557), (- 1.6328557), (- 1.6328557)], [(- 3.1828144), (- 3.1828144), (- 3.1828144), (- 3.1828144)]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.5429721), (- 1.5429721), (- 1.5429721), (- 1.5429721)], [(- 1.6100934), (- 1.6100934), (- 1.6100934), (- 1.6100934)]], [[(- 1.7103812), (- 1.7103812), (- 1.7103812), (- 1.7103812)], [(- 1.6344175), (- 1.6344175), (- 1.6344175), (- 1.6344175)]]]]).float()
assert torch.allclose(scores.grad.detach().cpu(), expected_scores_grad, atol=1e-06)
assert torch.allclose(points.grad.detach().cpu(), expected_points_grad, atol=1e-06)
assert torch.allclose(centers.grad.detach().cpu(), expected_centers_grad, atol=1e-06)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_ball_query():
new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0668), 6.0278, (- 0.4875)], [0.4066, 1.4211, (- 0.2947)], [(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0289), 2.4952, (- 0.1708)]]]).cuda()
xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [0.5555, 1.0399, (- 1.3634)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.5251), 2.4379, (- 0.8466)], [(- 0.9691), 1.1418, (- 1.3733)], [(- 0.2232), 0.9561, (- 1.3626)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.2822), 1.3192, (- 1.3645)], [0.1533, 1.5024, (- 1.0432)], [0.4917, 1.1529, (- 1.3496)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 0.7188), 0.9956, (- 0.5096)], [(- 2.0668), 6.0278, (- 0.4875)], [(- 1.9304), 3.3092, 0.661], [0.0949, 1.4332, 0.314], [(- 1.2879), 2.0008, (- 0.7791)], [(- 0.7252), 0.9611, (- 0.6371)], [0.4066, 1.4211, (- 0.2947)], [0.322, 1.4447, 0.3548], [(- 0.9744), 2.3856, (- 1.2)]]]).cuda()
idx = ball_query(0, 0.2, 5, xyz, new_xyz)
expected_idx = torch.tensor([[[0, 0, 0, 0, 0], [6, 6, 6, 6, 6], [2, 2, 2, 2, 2], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [7, 7, 7, 7, 7], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]).cuda()
assert torch.all((idx == expected_idx))
idx = ball_query(0.2, 0.4, 5, xyz, new_xyz)
expected_idx = torch.tensor([[[0, 5, 7, 0, 0], [6, 6, 6, 6, 6], [2, 3, 2, 2, 2], [0, 5, 7, 0, 0], [0, 5, 7, 0, 0]], [[0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [7, 7, 7, 7, 7], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]).cuda()
assert torch.all((idx == expected_idx))
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
class TestBBox(object):
def _test_bbox_overlaps(self, dtype=torch.float):
from mmcv.ops import bbox_overlaps
b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0], [7.0, 7.0, 8.0, 8.0]]).cuda().type(dtype)
b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0, 3.0]]).cuda().type(dtype)
should_output = np.array([[0.33333334, 0.5], [0.2, 0.5], [0.0, 0.0]])
out = bbox_overlaps(b1, b2, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 0.01)
b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0]]).cuda().type(dtype)
b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0, 3.0]]).cuda().type(dtype)
should_output = np.array([0.33333334, 0.5])
out = bbox_overlaps(b1, b2, aligned=True, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 0.01)
b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype)
b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype)
b2 = torch.tensor([[4.0, 0.0, 5.0, 3.0], [3.0, 0.0, 4.0, 3.0], [2.0, 0.0, 3.0, 3.0], [1.0, 0.0, 2.0, 3.0]]).cuda().type(dtype)
should_output = np.array([0, 0.2, 0.5, 0.5])
out = bbox_overlaps(b1, b2, offset=1)
assert np.allclose(out.cpu().numpy(), should_output, 0.01)
def test_bbox_overlaps_float(self):
self._test_bbox_overlaps(torch.float)
def test_bbox_overlaps_half(self):
self._test_bbox_overlaps(torch.half)
|
class TestBilinearGridSample(object):
def _test_bilinear_grid_sample(self, dtype=torch.float, align_corners=False, multiplier=1, precision=0.001):
from mmcv.ops.point_sample import bilinear_grid_sample
input = torch.rand(1, 1, 20, 20, dtype=dtype)
grid = torch.Tensor([[[1, 0, 0], [0, 1, 0]]])
grid = F.affine_grid(grid, (1, 1, 15, 15), align_corners=align_corners).type_as(input)
grid *= multiplier
out = bilinear_grid_sample(input, grid, align_corners=align_corners)
ref_out = F.grid_sample(input, grid, align_corners=align_corners)
assert np.allclose(out.data.detach().cpu().numpy(), ref_out.data.detach().cpu().numpy(), precision)
def test_bilinear_grid_sample(self):
self._test_bilinear_grid_sample(torch.double, False)
self._test_bilinear_grid_sample(torch.double, True)
self._test_bilinear_grid_sample(torch.float, False)
self._test_bilinear_grid_sample(torch.float, True)
self._test_bilinear_grid_sample(torch.float, False)
self._test_bilinear_grid_sample(torch.float, True, 5)
self._test_bilinear_grid_sample(torch.float, False, 10)
self._test_bilinear_grid_sample(torch.float, True, (- 6))
self._test_bilinear_grid_sample(torch.float, False, (- 10))
self._test_bilinear_grid_sample(torch.double, True, 5)
self._test_bilinear_grid_sample(torch.double, False, 10)
self._test_bilinear_grid_sample(torch.double, True, (- 6))
self._test_bilinear_grid_sample(torch.double, False, (- 10))
|
def _test_border_align_allclose(device, dtype, pool_size):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
try:
from mmcv.ops import BorderAlign, border_align
except ModuleNotFoundError:
pytest.skip('BorderAlign op is not successfully compiled')
np_input = np.array(input_arr)
np_boxes = np.array(boxes_arr)
np_output = np.array(output_dict[pool_size])
np_grad = np.array(input_grad_dict[pool_size])
input = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
boxes = torch.tensor(np_boxes, dtype=dtype, device=device)
input_cp = copy.deepcopy(input)
output = border_align(input_cp, boxes, pool_size)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(dtype).cpu().numpy(), np_output, atol=1e-05)
assert np.allclose(input_cp.grad.data.type(dtype).cpu().numpy(), np_grad, atol=1e-05)
pool_module = BorderAlign(pool_size)
output = pool_module(input, boxes)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(dtype).cpu().numpy(), np_output, atol=1e-05)
assert np.allclose(input.grad.data.type(dtype).cpu().numpy(), np_grad, atol=1e-05)
|
@pytest.mark.parametrize('device', ['cuda'])
@pytest.mark.parametrize('dtype', [torch.float, torch.half, torch.double])
@pytest.mark.parametrize('pool_size', [1, 2])
def test_border_align(device, dtype, pool_size):
_test_border_align_allclose(device, dtype, pool_size)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.