code
stringlengths
17
6.64M
@pytest.mark.parametrize('embed_dims', [False, 256]) def test_basetransformerlayer(embed_dims): attn_cfgs = (dict(type='MultiheadAttention', embed_dims=256, num_heads=8),) if embed_dims: ffn_cfgs = dict(type='FFN', embed_dims=embed_dims, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)) else: ffn_cfgs = dict(type='FFN', feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)) feedforward_channels = 2048 ffn_dropout = 0.1 operation_order = ('self_attn', 'norm', 'ffn', 'norm') baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, ffn_cfgs=ffn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order) assert (baselayer.batch_first is False) assert (baselayer.ffns[0].feedforward_channels == feedforward_channels) attn_cfgs = (dict(type='MultiheadAttention', num_heads=8, embed_dims=256),) feedforward_channels = 2048 ffn_dropout = 0.1 operation_order = ('self_attn', 'norm', 'ffn', 'norm') baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, batch_first=True) assert baselayer.attentions[0].batch_first in_tensor = torch.rand(2, 10, 256) baselayer(in_tensor)
def test_transformerlayersequence(): squeue = TransformerLayerSequence(num_layers=6, transformerlayers=dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256, num_heads=4)], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))) assert (len(squeue.layers) == 6) assert (squeue.pre_norm is False) with pytest.raises(AssertionError): TransformerLayerSequence(num_layers=6, transformerlayers=[dict(type='BaseTransformerLayer', attn_cfgs=[dict(type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict(type='MultiheadAttention', embed_dims=256)], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm'))])
def test_drop_path(): drop_path = DropPath(drop_prob=0) test_in = torch.rand(2, 3, 4, 5) assert (test_in is drop_path(test_in)) drop_path = DropPath(drop_prob=0.1) drop_path.training = False test_in = torch.rand(2, 3, 4, 5) assert (test_in is drop_path(test_in)) drop_path.training = True assert (test_in is not drop_path(test_in))
def test_constant_init(): conv_module = nn.Conv2d(3, 16, 3) constant_init(conv_module, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1)) assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) constant_init(conv_module_no_bias, 0.1) assert conv_module.weight.allclose(torch.full_like(conv_module.weight, 0.1))
def test_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) xavier_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) xavier_init(conv_module, distribution='uniform') with pytest.raises(AssertionError): xavier_init(conv_module, distribution='student-t') conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) xavier_init(conv_module_no_bias)
def test_normal_init(): conv_module = nn.Conv2d(3, 16, 3) normal_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) normal_init(conv_module_no_bias)
def test_trunc_normal_init(): def _random_float(a, b): return (((b - a) * random.random()) + a) def _is_trunc_normal(tensor, mean, std, a, b): z_samples = ((tensor.view((- 1)) - mean) / std) z_samples = z_samples.tolist() a0 = ((a - mean) / std) b0 = ((b - mean) / std) p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1] return (p_value > 0.0001) conv_module = nn.Conv2d(3, 16, 3) mean = _random_float((- 3), 3) std = _random_float(0.01, 1) a = _random_float((mean - (2 * std)), mean) b = _random_float(mean, (mean + (2 * std))) trunc_normal_init(conv_module, mean, std, a, b, bias=0.1) assert _is_trunc_normal(conv_module.weight, mean, std, a, b) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) trunc_normal_init(conv_module_no_bias)
def test_uniform_init(): conv_module = nn.Conv2d(3, 16, 3) uniform_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) uniform_init(conv_module_no_bias)
def test_kaiming_init(): conv_module = nn.Conv2d(3, 16, 3) kaiming_init(conv_module, bias=0.1) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1)) kaiming_init(conv_module, distribution='uniform') with pytest.raises(AssertionError): kaiming_init(conv_module, distribution='student-t') conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False) kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init(): conv_module = nn.Conv2d(3, 16, 3) caffe2_xavier_init(conv_module)
def test_bias_init_with_prob(): conv_module = nn.Conv2d(3, 16, 3) prior_prob = 0.1 normal_init(conv_module, bias=bias_init_with_prob(0.1)) bias = float((- np.log(((1 - prior_prob) / prior_prob)))) assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit(): 'test ConstantInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = ConstantInit(val=1, bias=2, layer='Conv2d') func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0))) assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0))) func = ConstantInit(val=3, bias_prob=0.01, layer='Linear') func(model) res = bias_init_with_prob(0.01) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd') func(model) assert torch.all((model[0].weight == 4.0)) assert torch.all((model[2].weight == 4.0)) assert torch.all((model[0].bias == 5.0)) assert torch.all((model[2].bias == 5.0)) with pytest.raises(TypeError): func = ConstantInit(val=1, bias='1') with pytest.raises(TypeError): func = ConstantInit(val=1, bias_prob='1') with pytest.raises(TypeError): func = ConstantInit(val=1, layer=1)
def test_xavierinit(): 'test XavierInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = XavierInit(bias=0.1, layer='Conv2d') func(model) assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1)) assert (not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1))) constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear']) func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear']) model.apply(constant_func) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0)) res = bias_init_with_prob(0.01) func(model) assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = ConstantInit(val=4.0, bias=5.0, layer='_ConvNd') func(model) assert torch.all((model[0].weight == 4.0)) assert torch.all((model[2].weight == 4.0)) assert torch.all((model[0].bias == 5.0)) assert torch.all((model[2].bias == 5.0)) func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd') func(model) assert (not torch.all((model[0].weight == 4.0))) assert (not torch.all((model[2].weight == 4.0))) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res)) with pytest.raises(TypeError): func = XavierInit(bias='0.1', layer='Conv2d') with pytest.raises(TypeError): func = XavierInit(bias=0.1, layer=1)
def test_normalinit(): 'test Normalinit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = NormalInit(mean=100, std=1e-05, bias=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert model[2].weight.allclose(torch.tensor(100.0)) assert model[0].bias.allclose(torch.tensor(200.0)) assert model[2].bias.allclose(torch.tensor(200.0)) func = NormalInit(mean=300, std=1e-05, bias_prob=0.01, layer=['Conv2d', 'Linear']) res = bias_init_with_prob(0.01) func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert model[0].bias.allclose(torch.tensor(res)) assert model[2].bias.allclose(torch.tensor(res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = NormalInit(mean=300, std=1e-05, bias_prob=0.01, layer='_ConvNd') func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def test_truncnormalinit(): 'test TruncNormalInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = TruncNormalInit(mean=100, std=1e-05, bias=200, a=0, b=200, layer=['Conv2d', 'Linear']) func(model) assert model[0].weight.allclose(torch.tensor(100.0)) assert model[2].weight.allclose(torch.tensor(100.0)) assert model[0].bias.allclose(torch.tensor(200.0)) assert model[2].bias.allclose(torch.tensor(200.0)) func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer=['Conv2d', 'Linear']) res = bias_init_with_prob(0.01) func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert model[0].bias.allclose(torch.tensor(res)) assert model[2].bias.allclose(torch.tensor(res)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = TruncNormalInit(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer='_ConvNd') func(model) assert model[0].weight.allclose(torch.tensor(300.0)) assert model[2].weight.allclose(torch.tensor(300.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def test_uniforminit(): '"test UniformInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear']) func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0)) func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10) func(model) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 100.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 100.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd') res = bias_init_with_prob(0.01) func(model) assert torch.all((model[0].weight == 100.0)) assert torch.all((model[2].weight == 100.0)) assert torch.all((model[0].bias == res)) assert torch.all((model[2].bias == res))
def test_kaiminginit(): 'test KaimingInit class.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = KaimingInit(bias=0.1, layer='Conv2d') func(model) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1)) assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))) func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear']) constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear']) model.apply(constant_func) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0)) func(model) assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0)) model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1)) func = KaimingInit(bias=0.1, layer='_ConvNd') func(model) assert torch.all((model[0].bias == 0.1)) assert torch.all((model[2].bias == 0.1)) func = KaimingInit(a=100, bias=10, layer='_ConvNd') constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd') model.apply(constant_func) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.0)) func(model) assert (not torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.0))) assert (not torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.0))) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.0))
def test_caffe2xavierinit(): 'test Caffe2XavierInit.' model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) func = Caffe2XavierInit(bias=0.1, layer='Conv2d') func(model) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1)) assert (not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1)))
class FooModule(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(1, 2) self.conv2d = nn.Conv2d(3, 1, 3) self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit(): 'test PretrainedInit class.' modelA = FooModule() constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear']) modelA.apply(constant_func) modelB = FooModule() funcB = PretrainedInit(checkpoint='modelA.pth') modelC = nn.Linear(1, 2) funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.') with TemporaryDirectory(): torch.save(modelA.state_dict(), 'modelA.pth') funcB(modelB) assert torch.equal(modelB.linear.weight, torch.full(modelB.linear.weight.shape, 1.0)) assert torch.equal(modelB.linear.bias, torch.full(modelB.linear.bias.shape, 2.0)) assert torch.equal(modelB.conv2d.weight, torch.full(modelB.conv2d.weight.shape, 1.0)) assert torch.equal(modelB.conv2d.bias, torch.full(modelB.conv2d.bias.shape, 2.0)) assert torch.equal(modelB.conv2d_2.weight, torch.full(modelB.conv2d_2.weight.shape, 1.0)) assert torch.equal(modelB.conv2d_2.bias, torch.full(modelB.conv2d_2.bias.shape, 2.0)) funcC(modelC) assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.0)) assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.0))
def test_initialize(): model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2)) foonet = FooModule() init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2) initialize(model, init_cfg) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.0)) assert (init_cfg == dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)) init_cfg = [dict(type='Constant', layer='Conv2d', val=1, bias=2), dict(type='Constant', layer='Linear', val=3, bias=4)] initialize(model, init_cfg) assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.0)) assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.0)) assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.0)) assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.0)) assert (init_cfg == [dict(type='Constant', layer='Conv2d', val=1, bias=2), dict(type='Constant', layer='Linear', val=3, bias=4)]) init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_2', val=3, bias=4)) initialize(foonet, init_cfg) assert torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 1.0)) assert torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 2.0)) assert torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 1.0)) assert torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 2.0)) assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 3.0)) assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 4.0)) assert (init_cfg == dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_2', val=3, bias=4))) init_cfg = dict(type='Constant', val=5, bias=6, override=dict(name='conv2d_2')) initialize(foonet, init_cfg) assert (not torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 5.0))) assert (not torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 6.0))) assert (not torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 5.0))) assert (not torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 6.0))) assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 5.0)) assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 6.0)) assert (init_cfg == dict(type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))) init_cfg = dict(type='Pretrained', checkpoint='modelA.pth', override=dict(type='Constant', name='conv2d_2', val=3, bias=4)) modelA = FooModule() constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear']) modelA.apply(constant_func) with TemporaryDirectory(): torch.save(modelA.state_dict(), 'modelA.pth') initialize(foonet, init_cfg) assert torch.equal(foonet.linear.weight, torch.full(foonet.linear.weight.shape, 1.0)) assert torch.equal(foonet.linear.bias, torch.full(foonet.linear.bias.shape, 2.0)) assert torch.equal(foonet.conv2d.weight, torch.full(foonet.conv2d.weight.shape, 1.0)) assert torch.equal(foonet.conv2d.bias, torch.full(foonet.conv2d.bias.shape, 2.0)) assert torch.equal(foonet.conv2d_2.weight, torch.full(foonet.conv2d_2.weight.shape, 3.0)) assert torch.equal(foonet.conv2d_2.bias, torch.full(foonet.conv2d_2.bias.shape, 4.0)) assert (init_cfg == dict(type='Pretrained', checkpoint='modelA.pth', override=dict(type='Constant', name='conv2d_2', val=3, bias=4))) with pytest.raises(TypeError): init_cfg = 'init_cfg' initialize(foonet, init_cfg) with pytest.raises(TypeError): init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override='conv') initialize(foonet, init_cfg) with pytest.raises(RuntimeError): init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=dict(type='Constant', name='conv2d_3', val=3, bias=4)) initialize(foonet, init_cfg) with pytest.raises(RuntimeError): init_cfg = dict(type='Constant', val=1, bias=2, layer=['Conv2d', 'Linear'], override=[dict(type='Constant', name='conv2d', val=3, bias=4), dict(type='Constant', name='conv2d_3', val=5, bias=6)]) initialize(foonet, init_cfg) with pytest.raises(ValueError): init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='conv2d_2', val=3, bias=4)) initialize(foonet, init_cfg) with pytest.raises(ValueError): init_cfg = dict(type='Constant', val=1, bias=2, override=dict(type='Constant', val=3, bias=4)) initialize(foonet, init_cfg)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): '\n CommandLine:\n xdoctest -m tests/test_wrappers.py test_conv2d\n ' x_empty = torch.randn(0, in_channel, in_h, in_w) torch.manual_seed(0) wrapper = Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_h, in_w).requires_grad_(True) torch.manual_seed(0) ref = nn.Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_h, in_w) wrapper = Conv2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): '\n CommandLine:\n xdoctest -m tests/test_wrappers.py test_conv3d\n ' x_empty = torch.randn(0, in_channel, in_t, in_h, in_w) torch.manual_seed(0) wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w).requires_grad_(True) torch.manual_seed(0) ref = nn.Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w) wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv_transposed_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True) op = (min(stride, dilation) - 1) if (torch.__version__ == 'parrots'): op = 0 torch.manual_seed(0) wrapper = ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_h, in_w) torch.manual_seed(0) ref = nn.ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_h, in_w) wrapper = ConvTranspose2d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) def test_conv_transposed_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True) op = (min(stride, dilation) - 1) torch.manual_seed(0) wrapper = ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w) torch.manual_seed(0) ref = nn.ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_channel, in_t, in_h, in_w) wrapper = ConvTranspose3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=op) wrapper.eval() wrapper(x_empty)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 3, 3, 5, 2, 1, 2)]) def test_max_pool_2d(in_w, in_h, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_h, in_w, requires_grad=True) wrapper = MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_h, in_w) ref = nn.MaxPool2d(kernel_size, stride=stride, padding=padding, dilation=dilation) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) assert torch.equal(wrapper(x_normal), ref_out)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)]) @pytest.mark.skipif(((torch.__version__ == 'parrots') and (not torch.cuda.is_available())), reason='parrots requires CUDA support') def test_max_pool_3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation): x_empty = torch.randn(0, in_channel, in_t, in_h, in_w, requires_grad=True) wrapper = MaxPool3d(kernel_size, stride=stride, padding=padding, dilation=dilation) if (torch.__version__ == 'parrots'): x_empty = x_empty.cuda() wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_channel, in_t, in_h, in_w) ref = nn.MaxPool3d(kernel_size, stride=stride, padding=padding, dilation=dilation) if (torch.__version__ == 'parrots'): x_normal = x_normal.cuda() ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) assert torch.equal(wrapper(x_normal), ref_out)
@patch('torch.__version__', torch_version) @pytest.mark.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1), (20, 20, 3, 3)]) def test_linear(in_w, in_h, in_feature, out_feature): x_empty = torch.randn(0, in_feature, requires_grad=True) torch.manual_seed(0) wrapper = Linear(in_feature, out_feature) wrapper_out = wrapper(x_empty) x_normal = torch.randn(3, in_feature) torch.manual_seed(0) ref = nn.Linear(in_feature, out_feature) ref_out = ref(x_normal) assert (wrapper_out.shape[0] == 0) assert (wrapper_out.shape[1:] == ref_out.shape[1:]) wrapper_out.sum().backward() assert (wrapper.weight.grad is not None) assert (wrapper.weight.grad.shape == wrapper.weight.shape) assert torch.equal(wrapper(x_normal), ref_out) x_empty = torch.randn(0, in_feature) wrapper = Linear(in_feature, out_feature) wrapper.eval() wrapper(x_empty)
@patch('mmcv.cnn.bricks.wrappers.TORCH_VERSION', (1, 10)) def test_nn_op_forward_called(): for m in ['Conv2d', 'ConvTranspose2d', 'MaxPool2d']: with patch(f'torch.nn.{m}.forward') as nn_module_forward: x_empty = torch.randn(0, 3, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_empty) nn_module_forward.assert_called_with(x_empty) x_normal = torch.randn(1, 3, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_normal) nn_module_forward.assert_called_with(x_normal) for m in ['Conv3d', 'ConvTranspose3d', 'MaxPool3d']: with patch(f'torch.nn.{m}.forward') as nn_module_forward: x_empty = torch.randn(0, 3, 10, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_empty) nn_module_forward.assert_called_with(x_empty) x_normal = torch.randn(1, 3, 10, 10, 10) wrapper = eval(m)(3, 2, 1) wrapper(x_normal) nn_module_forward.assert_called_with(x_normal) with patch('torch.nn.Linear.forward') as nn_module_forward: x_empty = torch.randn(0, 3) wrapper = Linear(3, 3) wrapper(x_empty) nn_module_forward.assert_called_with(x_empty) x_normal = torch.randn(1, 3) wrapper = Linear(3, 3) wrapper(x_normal) nn_module_forward.assert_called_with(x_normal)
@contextmanager def build_temporary_directory(): 'Build a temporary directory containing many files to test\n ``FileClient.list_dir_or_file``.\n\n . \n\n | -- dir1 \n\n | -- | -- text3.txt \n\n | -- dir2 \n\n | -- | -- dir3 \n\n | -- | -- | -- text4.txt \n\n | -- | -- img.jpg \n\n | -- text1.txt \n\n | -- text2.txt \n\n ' with tempfile.TemporaryDirectory() as tmp_dir: text1 = (Path(tmp_dir) / 'text1.txt') text1.open('w').write('text1') text2 = (Path(tmp_dir) / 'text2.txt') text2.open('w').write('text2') dir1 = (Path(tmp_dir) / 'dir1') dir1.mkdir() text3 = (dir1 / 'text3.txt') text3.open('w').write('text3') dir2 = (Path(tmp_dir) / 'dir2') dir2.mkdir() jpg1 = (dir2 / 'img.jpg') jpg1.open('wb').write(b'img') dir3 = (dir2 / 'dir3') dir3.mkdir() text4 = (dir3 / 'text4.txt') text4.open('w').write('text4') (yield tmp_dir)
@contextmanager def delete_and_reset_method(obj, method): method_obj = deepcopy(getattr(type(obj), method)) try: delattr(type(obj), method) (yield) finally: setattr(type(obj), method, method_obj)
class MockS3Client(): def __init__(self, enable_mc=True): self.enable_mc = enable_mc def Get(self, filepath): with open(filepath, 'rb') as f: content = f.read() return content
class MockPetrelClient(): def __init__(self, enable_mc=True, enable_multi_cluster=False): self.enable_mc = enable_mc self.enable_multi_cluster = enable_multi_cluster def Get(self, filepath): with open(filepath, 'rb') as f: content = f.read() return content def put(self): pass def delete(self): pass def contains(self): pass def isdir(self): pass def list(self, dir_path): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): (yield entry.name) elif osp.isdir(entry.path): (yield (entry.name + '/'))
class MockMemcachedClient(): def __init__(self, server_list_cfg, client_cfg): pass def Get(self, filepath, buffer): with open(filepath, 'rb') as f: buffer.content = f.read()
class TestFileClient(): @classmethod def setup_class(cls): cls.test_data_dir = (Path(__file__).parent / 'data') cls.img_path = (cls.test_data_dir / 'color.jpg') cls.img_shape = (300, 400, 3) cls.text_path = (cls.test_data_dir / 'filelist.txt') def test_error(self): with pytest.raises(ValueError): FileClient('hadoop') def test_disk_backend(self): disk_backend = FileClient('disk') assert (disk_backend.name == 'HardDiskBackend') assert disk_backend.allow_symlink img_bytes = disk_backend.get(self.img_path) img = mmcv.imfrombytes(img_bytes) assert (self.img_path.open('rb').read() == img_bytes) assert (img.shape == self.img_shape) img_bytes = disk_backend.get(str(self.img_path)) img = mmcv.imfrombytes(img_bytes) assert (self.img_path.open('rb').read() == img_bytes) assert (img.shape == self.img_shape) value_buf = disk_backend.get_text(self.text_path) assert (self.text_path.open('r').read() == value_buf) value_buf = disk_backend.get_text(str(self.text_path)) assert (self.text_path.open('r').read() == value_buf) with tempfile.TemporaryDirectory() as tmp_dir: filepath1 = (Path(tmp_dir) / 'test.jpg') disk_backend.put(b'disk', filepath1) assert (filepath1.open('rb').read() == b'disk') _filepath1 = ((Path(tmp_dir) / 'not_existed_dir1') / 'test.jpg') disk_backend.put(b'disk', _filepath1) assert (_filepath1.open('rb').read() == b'disk') filepath2 = (Path(tmp_dir) / 'test.txt') disk_backend.put_text('disk', filepath2) assert (filepath2.open('r').read() == 'disk') _filepath2 = ((Path(tmp_dir) / 'not_existed_dir2') / 'test.txt') disk_backend.put_text('disk', _filepath2) assert (_filepath2.open('r').read() == 'disk') assert disk_backend.isfile(filepath2) assert (not disk_backend.isfile((Path(tmp_dir) / 'not/existed/path'))) disk_backend.remove(filepath2) assert (not disk_backend.exists(filepath2)) with disk_backend.get_local_path(filepath1) as path: assert (str(filepath1) == path) assert osp.isfile(filepath1) disk_dir = '/path/of/your/directory' assert (disk_backend.join_path(disk_dir, 'file') == osp.join(disk_dir, 'file')) assert (disk_backend.join_path(disk_dir, 'dir', 'file') == osp.join(disk_dir, 'dir', 'file')) with build_temporary_directory() as tmp_dir: assert (set(disk_backend.list_dir_or_file(tmp_dir)) == set(['dir1', 'dir2', 'text1.txt', 'text2.txt'])) assert (set(disk_backend.list_dir_or_file(tmp_dir, recursive=True)) == set(['dir1', osp.join('dir1', 'text3.txt'), 'dir2', osp.join('dir2', 'dir3'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt'])) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_file=False)) == set(['dir1', 'dir2'])) with pytest.raises(TypeError, match='`suffix` should be None when `list_dir` is True'): disk_backend.client.list_dir_or_file(tmp_dir, list_file=False, suffix='.txt') assert (set(disk_backend.list_dir_or_file(tmp_dir, list_file=False, recursive=True)) == set(['dir1', 'dir2', osp.join('dir2', 'dir3')])) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False)) == set(['text1.txt', 'text2.txt'])) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt'])) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt')) == set(['text1.txt', 'text2.txt'])) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'))) == set(['text1.txt', 'text2.txt'])) with pytest.raises(TypeError, match='`suffix` must be a string or tuple of strings'): disk_backend.client.list_dir_or_file(tmp_dir, list_dir=False, suffix=['.txt', '.jpg']) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt', recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), 'text1.txt', 'text2.txt'])) assert (set(disk_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'), recursive=True)) == set([osp.join('dir1', 'text3.txt'), osp.join('dir2', 'dir3', 'text4.txt'), osp.join('dir2', 'img.jpg'), 'text1.txt', 'text2.txt'])) @patch('ceph.S3Client', MockS3Client) def test_ceph_backend(self): ceph_backend = FileClient('ceph') assert (not ceph_backend.allow_symlink) with pytest.raises(NotImplementedError): ceph_backend.get_text(self.text_path) with pytest.raises(NotImplementedError): ceph_backend.get_text(str(self.text_path)) img_bytes = ceph_backend.get(self.img_path) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) img_bytes = ceph_backend.get(str(self.img_path)) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) with pytest.raises(AssertionError): FileClient('ceph', path_mapping=1) ceph_path = 's3://user/data' ceph_backend = FileClient('ceph', path_mapping={str(self.test_data_dir): ceph_path}) ceph_backend.client._client.Get = MagicMock(return_value=ceph_backend.client._client.Get(self.img_path)) img_bytes = ceph_backend.get(self.img_path) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) ceph_backend.client._client.Get.assert_called_with(str(self.img_path).replace(str(self.test_data_dir), ceph_path)) @patch('petrel_client.client.Client', MockPetrelClient) @pytest.mark.parametrize('backend,prefix', [('petrel', None), (None, 's3')]) def test_petrel_backend(self, backend, prefix): petrel_backend = FileClient(backend=backend, prefix=prefix) assert (not petrel_backend.allow_symlink) img_bytes = petrel_backend.get(self.img_path) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) img_bytes = petrel_backend.get(str(self.img_path)) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) with pytest.raises(AssertionError): FileClient('petrel', path_mapping=1) petrel_dir = 's3://user/data' petrel_backend = FileClient('petrel', path_mapping={str(self.test_data_dir): petrel_dir}) assert (petrel_backend.client._map_path(str(self.img_path)) == str(self.img_path).replace(str(self.test_data_dir), petrel_dir)) petrel_path = f'{petrel_dir}/test.jpg' petrel_backend = FileClient('petrel') assert (petrel_backend.client._format_path('s3://user\\data\\test.jpg') == petrel_path) with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get: assert (petrel_backend.get(petrel_path) == b'petrel') mock_get.assert_called_once_with(petrel_path) with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get: assert (petrel_backend.get_text(petrel_path) == 'petrel') mock_get.assert_called_once_with(petrel_path) with patch.object(petrel_backend.client._client, 'put') as mock_put: petrel_backend.put(b'petrel', petrel_path) mock_put.assert_called_once_with(petrel_path, b'petrel') with patch.object(petrel_backend.client._client, 'put') as mock_put: petrel_backend.put_text('petrel', petrel_path) mock_put.assert_called_once_with(petrel_path, b'petrel') assert has_method(petrel_backend.client._client, 'delete') with delete_and_reset_method(petrel_backend.client._client, 'delete'): assert (not has_method(petrel_backend.client._client, 'delete')) with pytest.raises(NotImplementedError): petrel_backend.remove(petrel_path) with patch.object(petrel_backend.client._client, 'delete') as mock_delete: petrel_backend.remove(petrel_path) mock_delete.assert_called_once_with(petrel_path) assert has_method(petrel_backend.client._client, 'contains') assert has_method(petrel_backend.client._client, 'isdir') with delete_and_reset_method(petrel_backend.client._client, 'contains'), delete_and_reset_method(petrel_backend.client._client, 'isdir'): assert (not has_method(petrel_backend.client._client, 'contains')) assert (not has_method(petrel_backend.client._client, 'isdir')) with pytest.raises(NotImplementedError): petrel_backend.exists(petrel_path) with patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains: assert petrel_backend.exists(petrel_path) mock_contains.assert_called_once_with(petrel_path) assert has_method(petrel_backend.client._client, 'isdir') with delete_and_reset_method(petrel_backend.client._client, 'isdir'): assert (not has_method(petrel_backend.client._client, 'isdir')) with pytest.raises(NotImplementedError): petrel_backend.isdir(petrel_path) with patch.object(petrel_backend.client._client, 'isdir', return_value=True) as mock_isdir: assert petrel_backend.isdir(petrel_dir) mock_isdir.assert_called_once_with(petrel_dir) assert has_method(petrel_backend.client._client, 'contains') with delete_and_reset_method(petrel_backend.client._client, 'contains'): assert (not has_method(petrel_backend.client._client, 'contains')) with pytest.raises(NotImplementedError): petrel_backend.isfile(petrel_path) with patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains: assert petrel_backend.isfile(petrel_path) mock_contains.assert_called_once_with(petrel_path) assert (petrel_backend.join_path(petrel_dir, 'file') == f'{petrel_dir}/file') assert (petrel_backend.join_path(f'{petrel_dir}/', 'file') == f'{petrel_dir}/file') assert (petrel_backend.join_path(petrel_dir, 'dir', 'file') == f'{petrel_dir}/dir/file') with patch.object(petrel_backend.client._client, 'Get', return_value=b'petrel') as mock_get, patch.object(petrel_backend.client._client, 'contains', return_value=True) as mock_contains: with petrel_backend.get_local_path(petrel_path) as path: assert (Path(path).open('rb').read() == b'petrel') assert (not osp.isfile(path)) mock_get.assert_called_once_with(petrel_path) mock_contains.assert_called_once_with(petrel_path) assert has_method(petrel_backend.client._client, 'list') with delete_and_reset_method(petrel_backend.client._client, 'list'): assert (not has_method(petrel_backend.client._client, 'list')) with pytest.raises(NotImplementedError): list(petrel_backend.list_dir_or_file(petrel_dir)) with build_temporary_directory() as tmp_dir: assert (set(petrel_backend.list_dir_or_file(tmp_dir)) == set(['dir1', 'dir2', 'text1.txt', 'text2.txt'])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, recursive=True)) == set(['dir1', '/'.join(('dir1', 'text3.txt')), 'dir2', '/'.join(('dir2', 'dir3')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt'])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_file=False)) == set(['dir1', 'dir2'])) with pytest.raises(TypeError, match='`list_dir` should be False when `suffix` is not None'): petrel_backend.client.list_dir_or_file(tmp_dir, list_file=False, suffix='.txt') assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_file=False, recursive=True)) == set(['dir1', 'dir2', '/'.join(('dir2', 'dir3'))])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False)) == set(['text1.txt', 'text2.txt'])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt'])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt')) == set(['text1.txt', 'text2.txt'])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'))) == set(['text1.txt', 'text2.txt'])) with pytest.raises(TypeError, match='`suffix` must be a string or tuple of strings'): petrel_backend.client.list_dir_or_file(tmp_dir, list_dir=False, suffix=['.txt', '.jpg']) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix='.txt', recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), 'text1.txt', 'text2.txt'])) assert (set(petrel_backend.list_dir_or_file(tmp_dir, list_dir=False, suffix=('.txt', '.jpg'), recursive=True)) == set(['/'.join(('dir1', 'text3.txt')), '/'.join(('dir2', 'dir3', 'text4.txt')), '/'.join(('dir2', 'img.jpg')), 'text1.txt', 'text2.txt'])) @patch('mc.MemcachedClient.GetInstance', MockMemcachedClient) @patch('mc.pyvector', MagicMock) @patch('mc.ConvertBuffer', (lambda x: x.content)) def test_memcached_backend(self): mc_cfg = dict(server_list_cfg='', client_cfg='', sys_path=None) mc_backend = FileClient('memcached', **mc_cfg) assert (not mc_backend.allow_symlink) with pytest.raises(NotImplementedError): mc_backend.get_text(self.text_path) with pytest.raises(NotImplementedError): mc_backend.get_text(str(self.text_path)) img_bytes = mc_backend.get(self.img_path) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) img_bytes = mc_backend.get(str(self.img_path)) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) def test_lmdb_backend(self): lmdb_path = (self.test_data_dir / 'demo.lmdb') lmdb_backend = FileClient('lmdb', db_path=lmdb_path) assert (not lmdb_backend.allow_symlink) with pytest.raises(NotImplementedError): lmdb_backend.get_text(self.text_path) img_bytes = lmdb_backend.get('baboon') img = mmcv.imfrombytes(img_bytes) assert (img.shape == (120, 125, 3)) lmdb_backend = FileClient('lmdb', db_path=str(lmdb_path)) with pytest.raises(NotImplementedError): lmdb_backend.get_text(str(self.text_path)) img_bytes = lmdb_backend.get('baboon') img = mmcv.imfrombytes(img_bytes) assert (img.shape == (120, 125, 3)) @pytest.mark.parametrize('backend,prefix', [('http', None), (None, 'http')]) def test_http_backend(self, backend, prefix): http_backend = FileClient(backend=backend, prefix=prefix) img_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/color.jpg' text_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/filelist.txt' assert (not http_backend.allow_symlink) with pytest.raises(Exception): http_backend.get(self.img_path) with pytest.raises(Exception): http_backend.get(str(self.img_path)) with pytest.raises(Exception): http_backend.get_text(self.text_path) with pytest.raises(Exception): http_backend.get_text(str(self.text_path)) img_bytes = http_backend.get(img_url) img = mmcv.imfrombytes(img_bytes) assert (img.shape == self.img_shape) value_buf = http_backend.get_text(text_url) assert (self.text_path.open('r').read() == value_buf) with http_backend.get_local_path(img_url) as path: assert (mmcv.imread(path).shape == self.img_shape) assert (not osp.isfile(path)) def test_new_magic_method(self): class DummyBackend1(BaseStorageBackend): def get(self, filepath): return filepath def get_text(self, filepath, encoding='utf-8'): return filepath FileClient.register_backend('dummy_backend', DummyBackend1) client1 = FileClient(backend='dummy_backend') client2 = FileClient(backend='dummy_backend') assert (client1 is client2) class DummyBackend2(BaseStorageBackend): def get(self, filepath): pass def get_text(self, filepath): pass FileClient.register_backend('dummy_backend', DummyBackend2, force=True) client3 = FileClient(backend='dummy_backend') client4 = FileClient(backend='dummy_backend') assert (client3 is not client4) def test_parse_uri_prefix(self): with pytest.raises(AssertionError): FileClient.parse_uri_prefix(None) with pytest.raises(AssertionError): FileClient.parse_uri_prefix([]) assert (FileClient.parse_uri_prefix(self.img_path) is None) assert (FileClient.parse_uri_prefix(str(self.img_path)) is None) img_url = 'https://raw.githubusercontent.com/open-mmlab/mmcv/master/tests/data/color.jpg' assert (FileClient.parse_uri_prefix(img_url) == 'https') img_url = 's3://your_bucket/img.png' assert (FileClient.parse_uri_prefix(img_url) == 's3') img_url = 'clusterName:s3://your_bucket/img.png' assert (FileClient.parse_uri_prefix(img_url) == 's3') def test_infer_client(self): file_client_args = {'backend': 'disk'} client = FileClient.infer_client(file_client_args) assert (client.name == 'HardDiskBackend') client = FileClient.infer_client(uri=self.img_path) assert (client.name == 'HardDiskBackend') file_client_args = {'backend': 'petrel'} client = FileClient.infer_client(file_client_args) assert (client.name == 'PetrelBackend') uri = 's3://user_data' client = FileClient.infer_client(uri=uri) assert (client.name == 'PetrelBackend') def test_register_backend(self): with pytest.raises(TypeError): class TestClass1(): pass FileClient.register_backend(1, TestClass1) with pytest.raises(TypeError): FileClient.register_backend('int', 0) with pytest.raises(TypeError): class TestClass1(): pass FileClient.register_backend('TestClass1', TestClass1) class ExampleBackend(BaseStorageBackend): def get(self, filepath): return filepath def get_text(self, filepath, encoding='utf-8'): return filepath FileClient.register_backend('example', ExampleBackend) example_backend = FileClient('example') assert (example_backend.get(self.img_path) == self.img_path) assert (example_backend.get_text(self.text_path) == self.text_path) assert ('example' in FileClient._backends) class Example2Backend(BaseStorageBackend): def get(self, filepath): return b'bytes2' def get_text(self, filepath, encoding='utf-8'): return 'text2' with pytest.raises(KeyError): FileClient.register_backend('example', Example2Backend) FileClient.register_backend('example', Example2Backend, force=True) example_backend = FileClient('example') assert (example_backend.get(self.img_path) == b'bytes2') assert (example_backend.get_text(self.text_path) == 'text2') @FileClient.register_backend(name='example3') class Example3Backend(BaseStorageBackend): def get(self, filepath): return b'bytes3' def get_text(self, filepath, encoding='utf-8'): return 'text3' example_backend = FileClient('example3') assert (example_backend.get(self.img_path) == b'bytes3') assert (example_backend.get_text(self.text_path) == 'text3') assert ('example3' in FileClient._backends) with pytest.raises(KeyError): @FileClient.register_backend(name='example3') class Example4Backend(BaseStorageBackend): def get(self, filepath): return b'bytes4' def get_text(self, filepath, encoding='utf-8'): return 'text4' @FileClient.register_backend(name='example3', force=True) class Example5Backend(BaseStorageBackend): def get(self, filepath): return b'bytes5' def get_text(self, filepath, encoding='utf-8'): return 'text5' example_backend = FileClient('example3') assert (example_backend.get(self.img_path) == b'bytes5') assert (example_backend.get_text(self.text_path) == 'text5') class Example6Backend(BaseStorageBackend): def get(self, filepath): return b'bytes6' def get_text(self, filepath, encoding='utf-8'): return 'text6' FileClient.register_backend('example4', Example6Backend, force=True, prefixes='example4_prefix') example_backend = FileClient('example4') assert (example_backend.get(self.img_path) == b'bytes6') assert (example_backend.get_text(self.text_path) == 'text6') example_backend = FileClient(prefix='example4_prefix') assert (example_backend.get(self.img_path) == b'bytes6') assert (example_backend.get_text(self.text_path) == 'text6') example_backend = FileClient('example4', prefix='example4_prefix') assert (example_backend.get(self.img_path) == b'bytes6') assert (example_backend.get_text(self.text_path) == 'text6') class Example7Backend(BaseStorageBackend): def get(self, filepath): return b'bytes7' def get_text(self, filepath, encoding='utf-8'): return 'text7' FileClient.register_backend('example5', Example7Backend, force=True, prefixes=['example5_prefix1', 'example5_prefix2']) example_backend = FileClient('example5') assert (example_backend.get(self.img_path) == b'bytes7') assert (example_backend.get_text(self.text_path) == 'text7') example_backend = FileClient(prefix='example5_prefix1') assert (example_backend.get(self.img_path) == b'bytes7') assert (example_backend.get_text(self.text_path) == 'text7') example_backend = FileClient(prefix='example5_prefix2') assert (example_backend.get(self.img_path) == b'bytes7') assert (example_backend.get_text(self.text_path) == 'text7') class Example8Backend(BaseStorageBackend): def get(self, filepath): return b'bytes8' def get_text(self, filepath, encoding='utf-8'): return 'text8' FileClient.register_backend('example6', Example8Backend, force=True, prefixes='example6_prefix') example_backend = FileClient('example6') assert (example_backend.get(self.img_path) == b'bytes8') assert (example_backend.get_text(self.text_path) == 'text8') example_backend = FileClient('example6', prefix='example4_prefix') assert (example_backend.get(self.img_path) == b'bytes8') assert (example_backend.get_text(self.text_path) == 'text8')
def _test_handler(file_format, test_obj, str_checker, mode='r+'): dump_str = mmcv.dump(test_obj, file_format=file_format) str_checker(dump_str) tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test_dump') mmcv.dump(test_obj, tmp_filename, file_format=file_format) assert osp.isfile(tmp_filename) load_obj = mmcv.load(tmp_filename, file_format=file_format) assert (load_obj == test_obj) os.remove(tmp_filename) method = ('put' if ('b' in mode) else 'put_text') with patch.object(PetrelBackend, method, return_value=None) as mock_method: filename = 's3://path/of/your/file' mmcv.dump(test_obj, filename, file_format=file_format) mock_method.assert_called() with tempfile.NamedTemporaryFile(mode, delete=False) as f: tmp_filename = f.name mmcv.dump(test_obj, f, file_format=file_format) assert osp.isfile(tmp_filename) with open(tmp_filename, mode) as f: load_obj = mmcv.load(f, file_format=file_format) assert (load_obj == test_obj) os.remove(tmp_filename) tmp_filename = osp.join(tempfile.gettempdir(), ('mmcv_test_dump.' + file_format)) mmcv.dump(test_obj, tmp_filename) assert osp.isfile(tmp_filename) load_obj = mmcv.load(tmp_filename) assert (load_obj == test_obj) os.remove(tmp_filename)
def test_json(): def json_checker(dump_str): assert (dump_str in ['[{"a": "abc", "b": 1}, 2, "c"]', '[{"b": 1, "a": "abc"}, 2, "c"]']) _test_handler('json', obj_for_test, json_checker)
def test_yaml(): def yaml_checker(dump_str): assert (dump_str in ['- {a: abc, b: 1}\n- 2\n- c\n', '- {b: 1, a: abc}\n- 2\n- c\n', '- a: abc\n b: 1\n- 2\n- c\n', '- b: 1\n a: abc\n- 2\n- c\n']) _test_handler('yaml', obj_for_test, yaml_checker)
def test_pickle(): def pickle_checker(dump_str): import pickle assert (pickle.loads(dump_str) == obj_for_test) _test_handler('pickle', obj_for_test, pickle_checker, mode='rb+')
def test_exception(): test_obj = [{'a': 'abc', 'b': 1}, 2, 'c'] with pytest.raises(ValueError): mmcv.dump(test_obj) with pytest.raises(TypeError): mmcv.dump(test_obj, 'tmp.txt')
def test_register_handler(): @mmcv.register_handler('txt') class TxtHandler1(mmcv.BaseFileHandler): def load_from_fileobj(self, file): return file.read() def dump_to_fileobj(self, obj, file): file.write(str(obj)) def dump_to_str(self, obj, **kwargs): return str(obj) @mmcv.register_handler(['txt1', 'txt2']) class TxtHandler2(mmcv.BaseFileHandler): def load_from_fileobj(self, file): return file.read() def dump_to_fileobj(self, obj, file): file.write('\n') file.write(str(obj)) def dump_to_str(self, obj, **kwargs): return str(obj) content = mmcv.load(osp.join(osp.dirname(__file__), 'data/filelist.txt')) assert (content == '1.jpg\n2.jpg\n3.jpg\n4.jpg\n5.jpg') tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.txt2') mmcv.dump(content, tmp_filename) with open(tmp_filename, 'r') as f: written = f.read() os.remove(tmp_filename) assert (written == ('\n' + content))
def test_list_from_file(): filename = osp.join(osp.dirname(__file__), 'data/filelist.txt') filelist = mmcv.list_from_file(filename) assert (filelist == ['1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg']) filelist = mmcv.list_from_file(filename, prefix='a/') assert (filelist == ['a/1.jpg', 'a/2.jpg', 'a/3.jpg', 'a/4.jpg', 'a/5.jpg']) filelist = mmcv.list_from_file(filename, offset=2) assert (filelist == ['3.jpg', '4.jpg', '5.jpg']) filelist = mmcv.list_from_file(filename, max_num=2) assert (filelist == ['1.jpg', '2.jpg']) filelist = mmcv.list_from_file(filename, offset=3, max_num=3) assert (filelist == ['4.jpg', '5.jpg']) with patch.object(HTTPBackend, 'get_text', return_value='1.jpg\n2.jpg\n3.jpg'): filename = 'http://path/of/your/file' filelist = mmcv.list_from_file(filename, file_client_args={'backend': 'http'}) assert (filelist == ['1.jpg', '2.jpg', '3.jpg']) filelist = mmcv.list_from_file(filename, file_client_args={'prefix': 'http'}) assert (filelist == ['1.jpg', '2.jpg', '3.jpg']) filelist = mmcv.list_from_file(filename) assert (filelist == ['1.jpg', '2.jpg', '3.jpg']) with patch.object(PetrelBackend, 'get_text', return_value='1.jpg\n2.jpg\n3.jpg'): filename = 's3://path/of/your/file' filelist = mmcv.list_from_file(filename, file_client_args={'backend': 'petrel'}) assert (filelist == ['1.jpg', '2.jpg', '3.jpg']) filelist = mmcv.list_from_file(filename, file_client_args={'prefix': 's3'}) assert (filelist == ['1.jpg', '2.jpg', '3.jpg']) filelist = mmcv.list_from_file(filename) assert (filelist == ['1.jpg', '2.jpg', '3.jpg'])
def test_dict_from_file(): filename = osp.join(osp.dirname(__file__), 'data/mapping.txt') mapping = mmcv.dict_from_file(filename) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) mapping = mmcv.dict_from_file(filename, key_type=int) assert (mapping == {1: 'cat', 2: ['dog', 'cow'], 3: 'panda'}) with patch.object(HTTPBackend, 'get_text', return_value='1 cat\n2 dog cow\n3 panda'): filename = 'http://path/of/your/file' mapping = mmcv.dict_from_file(filename, file_client_args={'backend': 'http'}) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) mapping = mmcv.dict_from_file(filename, file_client_args={'prefix': 'http'}) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) mapping = mmcv.dict_from_file(filename) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) with patch.object(PetrelBackend, 'get_text', return_value='1 cat\n2 dog cow\n3 panda'): filename = 's3://path/of/your/file' mapping = mmcv.dict_from_file(filename, file_client_args={'backend': 'petrel'}) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) mapping = mmcv.dict_from_file(filename, file_client_args={'prefix': 's3'}) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'}) mapping = mmcv.dict_from_file(filename) assert (mapping == {'1': 'cat', '2': ['dog', 'cow'], '3': 'panda'})
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_tensor2imgs(): with pytest.raises(AssertionError): tensor = np.random.rand(2, 3, 3) mmcv.tensor2imgs(tensor) with pytest.raises(AssertionError): tensor = torch.randn(2, 3, 3) mmcv.tensor2imgs(tensor) with pytest.raises(AssertionError): tensor = torch.randn(2, 4, 3, 3) mmcv.tensor2imgs(tensor) with pytest.raises(AssertionError): tensor = torch.randn(2, 3, 5, 5) mmcv.tensor2imgs(tensor, mean=(1,)) tensor = torch.randn(2, 1, 5, 5) mmcv.tensor2imgs(tensor, mean=(0, 0, 0)) with pytest.raises(AssertionError): tensor = torch.randn(2, 3, 5, 5) mmcv.tensor2imgs(tensor, std=(1,)) tensor = torch.randn(2, 1, 5, 5) mmcv.tensor2imgs(tensor, std=(1, 1, 1)) with pytest.raises(AssertionError): tensor = torch.randn(2, 1, 5, 5) mmcv.tensor2imgs(tensor, mean=(0,), std=(1,), to_rgb=True) tensor = torch.randn(2, 3, 5, 5) gts = [t.cpu().numpy().transpose(1, 2, 0).astype(np.uint8) for t in tensor.flip(1)] outputs = mmcv.tensor2imgs(tensor, to_rgb=True) for (gt, output) in zip(gts, outputs): assert_array_equal(gt, output) tensor = torch.randn(2, 3, 5, 5) gts = [t.cpu().numpy().transpose(1, 2, 0).astype(np.uint8) for t in tensor] outputs = mmcv.tensor2imgs(tensor, to_rgb=False) for (gt, output) in zip(gts, outputs): assert_array_equal(gt, output) tensor = torch.randn(2, 1, 5, 5) gts = [t.squeeze(0).cpu().numpy().astype(np.uint8) for t in tensor] outputs = mmcv.tensor2imgs(tensor, to_rgb=False) for (gt, output) in zip(gts, outputs): assert_array_equal(gt, output)
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_set_mmcv_home(): os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/') os.environ[ENV_MMCV_HOME] = mmcv_home assert (_get_mmcv_home() == mmcv_home)
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_default_mmcv_home(): os.environ.pop(ENV_MMCV_HOME, None) os.environ.pop(ENV_XDG_CACHE_HOME, None) assert (_get_mmcv_home() == os.path.expanduser(os.path.join(DEFAULT_CACHE_DIR, 'mmcv'))) model_urls = get_external_models() assert (model_urls == mmcv.load(osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')))
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_get_external_models(): os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/') os.environ[ENV_MMCV_HOME] = mmcv_home ext_urls = get_external_models() assert (ext_urls == {'train': 'https://localhost/train.pth', 'test': 'test.pth', 'val': 'val.pth', 'train_empty': 'train.pth'})
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) def test_get_deprecated_models(): os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home/') os.environ[ENV_MMCV_HOME] = mmcv_home dep_urls = get_deprecated_model_names() assert (dep_urls == {'train_old': 'train', 'test_old': 'test'})
def load_from_http(url, map_location=None): return ('url:' + url)
def load_url(url, map_location=None, model_dir=None): return load_from_http(url)
def load(filepath, map_location=None): return ('local:' + filepath)
@patch('mmcv.__path__', [osp.join(osp.dirname(__file__), 'data/')]) @patch('mmcv.runner.checkpoint.load_from_http', load_from_http) @patch('mmcv.runner.checkpoint.load_url', load_url) @patch('torch.load', load) def test_load_external_url(): url = _load_checkpoint('modelzoo://resnet50') if (TORCH_VERSION < '1.9.0'): assert (url == 'url:https://download.pytorch.org/models/resnet50-19c8e357.pth') else: assert (url == 'url:https://download.pytorch.org/models/resnet50-0676ba61.pth') url = _load_checkpoint('torchvision://resnet50') if (TORCH_VERSION < '1.9.0'): assert (url == 'url:https://download.pytorch.org/models/resnet50-19c8e357.pth') else: assert (url == 'url:https://download.pytorch.org/models/resnet50-0676ba61.pth') os.environ.pop(ENV_MMCV_HOME, None) os.environ.pop(ENV_XDG_CACHE_HOME, None) url = _load_checkpoint('open-mmlab://train') assert (url == 'url:https://localhost/train.pth') os.environ.pop(ENV_MMCV_HOME, None) os.environ.pop(ENV_XDG_CACHE_HOME, None) with pytest.warns(Warning, match='open-mmlab://train_old is deprecated in favor of open-mmlab://train'): url = _load_checkpoint('open-mmlab://train_old') assert (url == 'url:https://localhost/train.pth') os.environ.pop(ENV_MMCV_HOME, None) os.environ.pop(ENV_XDG_CACHE_HOME, None) with pytest.warns(Warning, match='openmmlab://train_old is deprecated in favor of openmmlab://train'): url = _load_checkpoint('openmmlab://train_old') assert (url == 'url:https://localhost/train.pth') os.environ.pop(ENV_MMCV_HOME, None) mmcv_home = osp.join(osp.dirname(__file__), 'data/model_zoo/mmcv_home') os.environ[ENV_MMCV_HOME] = mmcv_home url = _load_checkpoint('open-mmlab://train') assert (url == 'url:https://localhost/train.pth') with pytest.raises(FileNotFoundError, match='train.pth can not be found.'): _load_checkpoint('open-mmlab://train_empty') url = _load_checkpoint('open-mmlab://test') assert (url == f"local:{osp.join(_get_mmcv_home(), 'test.pth')}") url = _load_checkpoint('open-mmlab://val') assert (url == f"local:{osp.join(_get_mmcv_home(), 'val.pth')}") url = _load_checkpoint('http://localhost/train.pth') assert (url == 'url:http://localhost/train.pth') with pytest.raises(FileNotFoundError, match='train.pth can not be found.'): _load_checkpoint('train.pth') url = _load_checkpoint(osp.join(_get_mmcv_home(), 'test.pth')) assert (url == f"local:{osp.join(_get_mmcv_home(), 'test.pth')}")
@pytest.mark.parametrize('device', ['cpu', pytest.param('cuda', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))]) def test_active_rotated_filter(device): feature = torch.tensor(np_feature, dtype=torch.float, device=device, requires_grad=True) indices = torch.tensor(np_indices, dtype=torch.int, device=device) output = active_rotated_filter(feature, indices) output.backward(torch.ones_like(output)) assert np.allclose(output.data.cpu().numpy(), expected_output, atol=0.001) assert np.allclose(feature.grad.data.cpu().numpy(), expected_grad, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_paconv_assign_scores(): scores = torch.tensor([[[[0.06947571, 0.6065746], [0.28462553, 0.8378516], [0.7595994, 0.97220325], [0.519155, 0.766185]], [[0.15348864, 0.6051019], [0.21510637, 0.31916398], [0.00236845, 0.5842595], [0.6783676, 0.5216348]]], [[[0.23089725, 0.5568468], [0.7405102, 0.06438422], [0.6887394, 0.22089851], [0.0502342, 0.79228795]], [[0.44883424, 0.15427643], [0.13817799, 0.34856772], [0.7989621, 0.33788306], [0.15699774, 0.7693662]]]]).float().cuda() scores.requires_grad_() points = torch.tensor([[[[0.06001121, 0.92963666, 0.5753327, 0.7251477], [0.53563064, 0.23129565, 0.92366195, 0.44261628]], [[0.5770022, 0.56625944, 0.23560429, 0.11178821], [0.7735967, 0.95678777, 0.25468266, 0.02895975]], [[0.0589869, 0.09017515, 0.5977862, 0.02797985], [0.603862, 0.35991007, 0.85761684, 0.3096559]], [[0.22359002, 0.13983732, 0.5544243, 0.68863827], [0.85646236, 0.75651926, 0.8638947, 0.83600986]], [[0.45424145, 0.27458847, 0.6456112, 0.47162914], [0.15773582, 0.47645122, 0.79964715, 0.3323908]], [[0.8351399, 0.84696376, 0.9431732, 0.29418713], [0.77168906, 0.6996871, 0.19354361, 0.03392768]], [[0.30976456, 0.7074133, 0.581795, 0.976677], [0.69656056, 0.07199162, 0.4708506, 0.29117996]], [[0.5829035, 0.30201727, 0.76556486, 0.0935446], [0.88030535, 0.16129416, 0.9242525, 0.49545723]]], [[[0.50899494, 0.06482804, 0.44939405, 0.37704808], [0.47028124, 0.11969638, 0.62823206, 0.28560323]], [[0.40690207, 0.689753, 0.51636654, 0.23040164], [0.06935787, 0.00488842, 0.22462702, 0.09182382]], [[0.26611632, 0.00184339, 0.7730655, 0.5228131], [0.87776035, 0.77895886, 0.2787183, 0.16620636]], [[0.502574, 0.04039001, 0.5368497, 0.98379374], [0.40973026, 0.3238272, 0.9733018, 0.13988364]], [[0.04586202, 0.20983845, 0.20662665, 0.22270602], [0.60387236, 0.5155574, 0.51237285, 0.6528438]], [[0.45735973, 0.86821306, 0.61054605, 0.8370336], [0.45193362, 0.3734138, 0.7825672, 0.5699416]], [[0.44591594, 0.12447512, 0.09282011, 0.7055254], [0.25223452, 0.46696228, 0.7051136, 0.892151]], [[0.49615085, 0.47321403, 0.93138885, 0.7652197], [0.38766378, 0.30332977, 0.23131835, 0.02863514]]]]).float().cuda() points.requires_grad_() centers = torch.tensor([[[[0.83878064, 0.96658987, 0.8033424, 0.9598312], [0.45035273, 0.8768925, 0.977736, 0.54547966]], [[0.01041394, 0.597893, 0.36212963, 0.4410367], [0.94879234, 0.8372817, 0.21237361, 0.67945415]], [[0.5096087, 0.26401454, 0.60034937, 0.5417416], [0.87591463, 0.546456, 0.4096033, 0.16373193]], [[0.79547447, 0.1482386, 0.12840575, 0.45384115], [0.5640288, 0.944541, 0.5745328, 0.73229736]], [[0.93011934, 0.7406011, 0.62621707, 0.8677915], [0.91563636, 0.3595413, 0.6678378, 0.6085383]], [[0.22431666, 0.65617776, 0.7483924, 0.6263364], [0.30968404, 0.78204364, 0.14899081, 0.09628749]], [[0.73675203, 0.72104895, 0.4648038, 0.6101647], [0.7817645, 0.16572917, 0.3311919, 0.43407398]], [[0.8193154, 0.09559608, 0.05978829, 0.90262103], [0.4256065, 0.8165596, 0.8206446, 0.6604721]]], [[[0.7159653, 0.18600845, 0.21433902, 0.3159626], [0.3921569, 0.33221376, 0.5061177, 0.7961841]], [[0.95338356, 0.04785997, 0.67185795, 0.6538394], [0.4729132, 0.33404195, 0.17750603, 0.8445621]], [[0.6755793, 0.16193843, 0.75943846, 0.92123103], [0.2781859, 0.03114432, 0.710638, 0.52729136]], [[0.8376105, 0.10858494, 0.13208169, 0.365772], [0.5930795, 0.27390373, 0.14036089, 0.170403]], [[0.3479789, 0.89855295, 0.04844379, 0.9871029], [0.29781651, 0.0244137, 0.9179047, 0.8081611]], [[0.12460887, 0.44991326, 0.19382608, 0.35037738], [0.2773472, 0.4362057, 0.36757517, 0.5993509]], [[0.29630446, 0.90046406, 0.5417113, 0.13510644], [0.09623539, 0.04226565, 0.32001644, 0.44358212]], [[0.5274848, 0.82096446, 0.9415489, 0.7123748], [0.7537517, 0.8086482, 0.85345286, 0.7472754]]]]).float().cuda() centers.requires_grad_() knn_idx = torch.tensor([[[6, 7, 4, 6], [2, 4, 2, 4]], [[7, 1, 3, 2], [6, 0, 2, 6]]]).long().cuda() aggregate = 'sum' expected_output = torch.tensor([[[[(- 0.08134781), 0.03877336, (- 0.8212776), (- 0.2869547)], [(- 0.23378491), (- 0.24112664), (- 0.1600166), (- 0.4121864)]], [[(- 0.05780616), (- 0.12298299), (- 0.0370461), (- 0.07889931)], [(- 0.13956165), (- 0.02006848), (- 0.10940295), (- 0.0293439)]], [[0.09284145, 0.58250105, 0.5927749, 0.16774094], [0.27070042, 0.13422406, 0.2617501, 0.23416464]], [[(- 0.06121218), (- 0.09561322), (- 0.20408826), 0.08079343], [0.00944228, 0.03874819, 0.08404065, 0.04041629]]], [[[(- 0.2110898), (- 0.13335688), (- 0.09315082), 0.08512095], [0.09121774, 0.15976946, 0.23994486, 0.14350912]], [[(- 0.36167958), (- 0.14891288), (- 0.64470863), (- 0.0646704)], [(- 0.28276974), (- 0.08847666), (- 0.46904767), 0.20491874]], [[(- 0.34877953), (- 0.35533834), (- 0.25225785), (- 0.4638189)], [(- 0.1420663), 0.09467781, 0.17088932, 0.22580585]], [[(- 0.3879708), (- 0.3991068), 0.05276498, (- 0.46989647)], [0.32522714, (- 0.02163534), 0.21604237, 0.4346682]]]]).float() output = assign_score_withk(scores, points, centers, knn_idx, aggregate) assert torch.allclose(output.detach().cpu(), expected_output, atol=1e-06) loss = output.sum() loss.backward() expected_scores_grad = torch.tensor([[[[0.04288036, (- 0.18217683)], [(- 0.78873926), 0.7485497], [(- 0.6866992), 0.05346543], [0.04288036, (- 0.18217683)]], [[(- 1.1407862), 0.13533896], [(- 0.06964391), (- 0.22948086)], [(- 1.1407862), 0.13533896], [(- 0.06964391), (- 0.22948086)]]], [[[(- 0.3363995), (- 2.212181)], [(- 1.1589496), (- 2.7724311)], [(- 0.9387654), (- 1.3163853)], [(- 1.4385346), (- 1.0614843)]], [[(- 0.5048497), 1.4143617], [(- 0.47332114), 0.6017133], [(- 0.30974793), 1.1995442], [(- 0.5048497), 1.4143617]]]]).float() expected_points_grad = torch.tensor([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.15585709, 0.15585709, 0.15585709, 0.15585709], [1.1893613, 1.1893613, 1.1893613, 1.1893613]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[1.6530733, 1.6530733, 1.6530733, 1.6530733], [1.8130021, 1.8130021, 1.8130021, 1.8130021]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.58863074, 0.58863074, 0.58863074, 0.58863074], [1.3727596, 1.3727596, 1.3727596, 1.3727596]], [[0.28462553, 0.28462553, 0.28462553, 0.28462553], [0.8378516, 0.8378516, 0.8378516, 0.8378516]]], [[[0.13817799, 0.13817799, 0.13817799, 0.13817799], [0.34856772, 0.34856772, 0.34856772, 0.34856772]], [[0.7405102, 0.7405102, 0.7405102, 0.7405102], [0.06438422, 0.06438422, 0.06438422, 0.06438422]], [[0.8491963, 0.8491963, 0.8491963, 0.8491963], [1.1301711, 1.1301711, 1.1301711, 1.1301711]], [[0.6887394, 0.6887394, 0.6887394, 0.6887394], [0.22089851, 0.22089851, 0.22089851, 0.22089851]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.605832, 0.605832, 0.605832, 0.605832], [0.92364264, 0.92364264, 0.92364264, 0.92364264]], [[0.23089725, 0.23089725, 0.23089725, 0.23089725], [0.5568468, 0.5568468, 0.5568468, 0.5568468]]]]).float() expected_centers_grad = torch.tensor([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.0493311), (- 1.0493311), (- 1.0493311), (- 1.0493311)], [(- 2.0301602), (- 2.0301602), (- 2.0301602), (- 2.0301602)]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.6328557), (- 1.6328557), (- 1.6328557), (- 1.6328557)], [(- 3.1828144), (- 3.1828144), (- 3.1828144), (- 3.1828144)]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[(- 1.5429721), (- 1.5429721), (- 1.5429721), (- 1.5429721)], [(- 1.6100934), (- 1.6100934), (- 1.6100934), (- 1.6100934)]], [[(- 1.7103812), (- 1.7103812), (- 1.7103812), (- 1.7103812)], [(- 1.6344175), (- 1.6344175), (- 1.6344175), (- 1.6344175)]]]]).float() assert torch.allclose(scores.grad.detach().cpu(), expected_scores_grad, atol=1e-06) assert torch.allclose(points.grad.detach().cpu(), expected_points_grad, atol=1e-06) assert torch.allclose(centers.grad.detach().cpu(), expected_centers_grad, atol=1e-06)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_ball_query(): new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0668), 6.0278, (- 0.4875)], [0.4066, 1.4211, (- 0.2947)], [(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0289), 2.4952, (- 0.1708)]]]).cuda() xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [0.5555, 1.0399, (- 1.3634)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.5251), 2.4379, (- 0.8466)], [(- 0.9691), 1.1418, (- 1.3733)], [(- 0.2232), 0.9561, (- 1.3626)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.2822), 1.3192, (- 1.3645)], [0.1533, 1.5024, (- 1.0432)], [0.4917, 1.1529, (- 1.3496)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 0.7188), 0.9956, (- 0.5096)], [(- 2.0668), 6.0278, (- 0.4875)], [(- 1.9304), 3.3092, 0.661], [0.0949, 1.4332, 0.314], [(- 1.2879), 2.0008, (- 0.7791)], [(- 0.7252), 0.9611, (- 0.6371)], [0.4066, 1.4211, (- 0.2947)], [0.322, 1.4447, 0.3548], [(- 0.9744), 2.3856, (- 1.2)]]]).cuda() idx = ball_query(0, 0.2, 5, xyz, new_xyz) expected_idx = torch.tensor([[[0, 0, 0, 0, 0], [6, 6, 6, 6, 6], [2, 2, 2, 2, 2], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [7, 7, 7, 7, 7], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]).cuda() assert torch.all((idx == expected_idx)) idx = ball_query(0.2, 0.4, 5, xyz, new_xyz) expected_idx = torch.tensor([[[0, 5, 7, 0, 0], [6, 6, 6, 6, 6], [2, 3, 2, 2, 2], [0, 5, 7, 0, 0], [0, 5, 7, 0, 0]], [[0, 0, 0, 0, 0], [2, 2, 2, 2, 2], [7, 7, 7, 7, 7], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]).cuda() assert torch.all((idx == expected_idx))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') class TestBBox(object): def _test_bbox_overlaps(self, dtype=torch.float): from mmcv.ops import bbox_overlaps b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0], [7.0, 7.0, 8.0, 8.0]]).cuda().type(dtype) b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0, 3.0]]).cuda().type(dtype) should_output = np.array([[0.33333334, 0.5], [0.2, 0.5], [0.0, 0.0]]) out = bbox_overlaps(b1, b2, offset=1) assert np.allclose(out.cpu().numpy(), should_output, 0.01) b1 = torch.tensor([[1.0, 1.0, 3.0, 4.0], [2.0, 2.0, 3.0, 4.0]]).cuda().type(dtype) b2 = torch.tensor([[0.0, 2.0, 2.0, 5.0], [2.0, 1.0, 3.0, 3.0]]).cuda().type(dtype) should_output = np.array([0.33333334, 0.5]) out = bbox_overlaps(b1, b2, aligned=True, offset=1) assert np.allclose(out.cpu().numpy(), should_output, 0.01) b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype) b1 = torch.tensor([[0.0, 0.0, 3.0, 3.0]]).cuda().type(dtype) b2 = torch.tensor([[4.0, 0.0, 5.0, 3.0], [3.0, 0.0, 4.0, 3.0], [2.0, 0.0, 3.0, 3.0], [1.0, 0.0, 2.0, 3.0]]).cuda().type(dtype) should_output = np.array([0, 0.2, 0.5, 0.5]) out = bbox_overlaps(b1, b2, offset=1) assert np.allclose(out.cpu().numpy(), should_output, 0.01) def test_bbox_overlaps_float(self): self._test_bbox_overlaps(torch.float) def test_bbox_overlaps_half(self): self._test_bbox_overlaps(torch.half)
class TestBilinearGridSample(object): def _test_bilinear_grid_sample(self, dtype=torch.float, align_corners=False, multiplier=1, precision=0.001): from mmcv.ops.point_sample import bilinear_grid_sample input = torch.rand(1, 1, 20, 20, dtype=dtype) grid = torch.Tensor([[[1, 0, 0], [0, 1, 0]]]) grid = F.affine_grid(grid, (1, 1, 15, 15), align_corners=align_corners).type_as(input) grid *= multiplier out = bilinear_grid_sample(input, grid, align_corners=align_corners) ref_out = F.grid_sample(input, grid, align_corners=align_corners) assert np.allclose(out.data.detach().cpu().numpy(), ref_out.data.detach().cpu().numpy(), precision) def test_bilinear_grid_sample(self): self._test_bilinear_grid_sample(torch.double, False) self._test_bilinear_grid_sample(torch.double, True) self._test_bilinear_grid_sample(torch.float, False) self._test_bilinear_grid_sample(torch.float, True) self._test_bilinear_grid_sample(torch.float, False) self._test_bilinear_grid_sample(torch.float, True, 5) self._test_bilinear_grid_sample(torch.float, False, 10) self._test_bilinear_grid_sample(torch.float, True, (- 6)) self._test_bilinear_grid_sample(torch.float, False, (- 10)) self._test_bilinear_grid_sample(torch.double, True, 5) self._test_bilinear_grid_sample(torch.double, False, 10) self._test_bilinear_grid_sample(torch.double, True, (- 6)) self._test_bilinear_grid_sample(torch.double, False, (- 10))
def _test_border_align_allclose(device, dtype, pool_size): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') try: from mmcv.ops import BorderAlign, border_align except ModuleNotFoundError: pytest.skip('BorderAlign op is not successfully compiled') np_input = np.array(input_arr) np_boxes = np.array(boxes_arr) np_output = np.array(output_dict[pool_size]) np_grad = np.array(input_grad_dict[pool_size]) input = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True) boxes = torch.tensor(np_boxes, dtype=dtype, device=device) input_cp = copy.deepcopy(input) output = border_align(input_cp, boxes, pool_size) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(dtype).cpu().numpy(), np_output, atol=1e-05) assert np.allclose(input_cp.grad.data.type(dtype).cpu().numpy(), np_grad, atol=1e-05) pool_module = BorderAlign(pool_size) output = pool_module(input, boxes) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(dtype).cpu().numpy(), np_output, atol=1e-05) assert np.allclose(input.grad.data.type(dtype).cpu().numpy(), np_grad, atol=1e-05)
@pytest.mark.parametrize('device', ['cuda']) @pytest.mark.parametrize('dtype', [torch.float, torch.half, torch.double]) @pytest.mark.parametrize('pool_size', [1, 2]) def test_border_align(device, dtype, pool_size): _test_border_align_allclose(device, dtype, pool_size)
class TestBoxIoURotated(object): def test_box_iou_rotated_cpu(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.3708, 0.4351, 0.0], [0.1104, 0.4487, 0.0424], [0.0, 0.0, 0.3622]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.3708, 0.4487, 0.3622], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1) boxes2 = torch.from_numpy(np_boxes2) ious = box_iou_rotated(boxes1, boxes2) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_box_iou_rotated_cuda(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.3708, 0.4351, 0.0], [0.1104, 0.4487, 0.0424], [0.0, 0.0, 0.3622]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.3708, 0.4487, 0.3622], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1).cuda() boxes2 = torch.from_numpy(np_boxes2).cuda() ious = box_iou_rotated(boxes1, boxes2) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) def test_box_iou_rotated_iof_cpu(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.4959, 0.5306, 0.0], [0.1823, 0.542, 0.1832], [0.0, 0.0, 0.4404]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.4959, 0.542, 0.4404], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1) boxes2 = torch.from_numpy(np_boxes2) ious = box_iou_rotated(boxes1, boxes2, mode='iof') assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, mode='iof', clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_box_iou_rotated_iof_cuda(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.4959, 0.5306, 0.0], [0.1823, 0.542, 0.1832], [0.0, 0.0, 0.4404]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.4959, 0.542, 0.4404], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1).cuda() boxes2 = torch.from_numpy(np_boxes2).cuda() ious = box_iou_rotated(boxes1, boxes2, mode='iof') assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, mode='iof', clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
class TestCarafe(object): def test_carafe_naive_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import CARAFENaive feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda').double() mask = torch.randn(2, 100, 6, 6, requires_grad=True, device='cuda').sigmoid().double() gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=0.0001, eps=0.0001) def test_carafe_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import CARAFE feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda').double() mask = torch.randn(2, 100, 6, 6, requires_grad=True, device='cuda').sigmoid().double() gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=0.0001, eps=0.0001)
class Loss(nn.Module): def __init__(self): super().__init__() def forward(self, input, target): input = input.view((- 1)) target = target.view((- 1)) return torch.mean((input - target))
class TestCrissCrossAttention(object): def test_cc_attention(self): device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) from mmcv.ops import CrissCrossAttention loss_func = Loss() input = np.fromfile('tests/data/for_ccattention/ccattention_input.bin', dtype=np.float32) output = np.fromfile('tests/data/for_ccattention/ccattention_output.bin', dtype=np.float32) input = input.reshape((1, 32, 45, 45)) output = output.reshape((1, 32, 45, 45)) label = torch.ones((1, 32, 45, 45)) input = torch.FloatTensor(input) output = torch.FloatTensor(output) input.requires_grad = True shape = input.shape channel = shape[1] cca = CrissCrossAttention(channel) cca.to(device) input = input.to(device) label = label.to(device) cca.train() test_output = cca(input) test_loss = loss_func(test_output, label) test_loss.backward() test_output = test_output.detach().cpu().numpy() output = output.numpy() assert np.allclose(test_output, output) assert (test_output.shape == shape)
def test_contour_expand(): from mmcv.ops import contour_expand np_internal_kernel_label = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).astype(np.int32) np_kernel_mask1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).astype(np.uint8) np_kernel_mask2 = (np_internal_kernel_label > 0).astype(np.uint8) np_kernel_mask = np.stack([np_kernel_mask1, np_kernel_mask2]) min_area = 1 kernel_region_num = 3 result = contour_expand(np_kernel_mask, np_internal_kernel_label, min_area, kernel_region_num) gt = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] assert np.allclose(result, gt) np_kernel_mask_t = torch.from_numpy(np_kernel_mask) np_internal_kernel_label_t = torch.from_numpy(np_internal_kernel_label) result = contour_expand(np_kernel_mask_t, np_internal_kernel_label_t, min_area, kernel_region_num) assert np.allclose(result, gt)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_convex_iou(): pointsets = torch.from_numpy(np_pointsets).cuda().float() polygons = torch.from_numpy(np_polygons).cuda().float() expected_iou = torch.from_numpy(np_expected_iou).cuda().float() assert torch.allclose(convex_iou(pointsets, polygons), expected_iou, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_convex_giou(): pointsets = torch.from_numpy(np_pointsets).cuda().float() polygons = torch.from_numpy(np_polygons).cuda().float() expected_giou = torch.from_numpy(np_expected_giou).cuda().float() expected_grad = torch.from_numpy(np_expected_grad).cuda().float() (giou, grad) = convex_giou(pointsets, polygons) assert torch.allclose(giou, expected_giou, atol=0.001) assert torch.allclose(grad, expected_grad, atol=0.001)
def test_corner_pool_device_and_dtypes_cpu(): '\n CommandLine:\n xdoctest -m tests/test_corner_pool.py test_corner_pool_device_and_dtypes_cpu\n ' with pytest.raises(AssertionError): pool = CornerPool('corner') lr_tensor = torch.tensor([[[[0, 0, 0, 0, 0], [2, 1, 3, 0, 2], [5, 4, 1, 1, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]]) tb_tensor = torch.tensor([[[[0, 3, 1, 0, 0], [0, 1, 1, 0, 0], [0, 3, 4, 0, 0], [0, 2, 2, 0, 0], [0, 0, 2, 0, 0]]]]) left_answer = torch.tensor([[[[0, 0, 0, 0, 0], [3, 3, 3, 2, 2], [6, 6, 6, 6, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]]) pool = CornerPool('left') left_tensor = pool(lr_tensor) assert (left_tensor.type() == lr_tensor.type()) assert torch.equal(left_tensor, left_answer) right_answer = torch.tensor([[[[0, 0, 0, 0, 0], [2, 2, 3, 3, 3], [5, 5, 5, 5, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]]) pool = CornerPool('right') right_tensor = pool(lr_tensor) assert (right_tensor.type() == lr_tensor.type()) assert torch.equal(right_tensor, right_answer) top_answer = torch.tensor([[[[0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 2, 2, 0, 0], [0, 0, 2, 0, 0]]]]) pool = CornerPool('top') top_tensor = pool(tb_tensor) assert (top_tensor.type() == tb_tensor.type()) assert torch.equal(top_tensor, top_answer) bottom_answer = torch.tensor([[[[0, 3, 1, 0, 0], [0, 3, 1, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0]]]]) pool = CornerPool('bottom') bottom_tensor = pool(tb_tensor) assert (bottom_tensor.type() == tb_tensor.type()) assert torch.equal(bottom_tensor, bottom_answer)
def assert_equal_tensor(tensor_a, tensor_b): assert tensor_a.eq(tensor_b).all()
class TestCorrelation(): def _test_correlation(self, dtype=torch.float): layer = Correlation(max_displacement=0) input1 = torch.tensor(_input1, dtype=dtype).cuda() input2 = torch.tensor(_input2, dtype=dtype).cuda() input1.requires_grad = True input2.requires_grad = True out = layer(input1, input2) out.backward(torch.ones_like(out)) gt_out = torch.tensor(_gt_out, dtype=dtype).cuda() assert_equal_tensor(out, gt_out) assert_equal_tensor(input1.grad.detach(), input2) assert_equal_tensor(input2.grad.detach(), input1) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_correlation(self): self._test_correlation(torch.float) self._test_correlation(torch.double) self._test_correlation(torch.half)
class TestDeformconv(object): def _test_deformconv(self, dtype=torch.float, threshold=0.001, device='cuda', batch_size=10, im2col_step=2): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') from mmcv.ops import DeformConv2dPack c_in = 1 c_out = 1 batch_size = 10 repeated_input = np.repeat(input, batch_size, axis=0) repeated_gt_out = np.repeat(gt_out, batch_size, axis=0) repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0) x = torch.tensor(repeated_input, device=device, dtype=dtype) x.requires_grad = True model = DeformConv2dPack(in_channels=c_in, out_channels=c_out, kernel_size=2, stride=1, padding=0, im2col_step=im2col_step) model.conv_offset.weight.data = torch.nn.Parameter(torch.Tensor(offset_weight).reshape(8, 1, 2, 2)) model.conv_offset.bias.data = torch.nn.Parameter(torch.Tensor(offset_bias).reshape(8)) model.weight.data = torch.nn.Parameter(torch.Tensor(deform_weight).reshape(1, 1, 2, 2)) if (device == 'cuda'): model.cuda() model.type(dtype) out = model(x) out.backward(torch.ones_like(out)) assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out, threshold) assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad, threshold) assert np.allclose((model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size), gt_offset_weight_grad, threshold) assert np.allclose((model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size), gt_offset_bias_grad, threshold) assert np.allclose((model.weight.grad.detach().cpu().numpy() / batch_size), gt_deform_weight_grad, threshold) from mmcv.ops import DeformConv2d model = DeformConv2d(1, 1, 2, stride=1, padding=0) assert (not hasattr(model, 'bias')) with pytest.raises(AssertionError): model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True) with pytest.raises(AssertionError): model = DeformConv2d(3, 2, 3, groups=2) with pytest.raises(AssertionError): model = DeformConv2d(3, 4, 3, groups=3) def _test_amp_deformconv(self, input_dtype, threshold=0.001, batch_size=10, im2col_step=2): 'The function to test amp released on pytorch 1.6.0.\n\n The type of input data might be torch.float or torch.half,\n so we should test deform_conv in both cases. With amp, the\n data type of model will NOT be set manually.\n\n Args:\n input_dtype: torch.float or torch.half.\n threshold: the same as above function.\n ' if (not torch.cuda.is_available()): return from mmcv.ops import DeformConv2dPack c_in = 1 c_out = 1 repeated_input = np.repeat(input, batch_size, axis=0) repeated_gt_out = np.repeat(gt_out, batch_size, axis=0) repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0) x = torch.Tensor(repeated_input).cuda().type(input_dtype) x.requires_grad = True model = DeformConv2dPack(in_channels=c_in, out_channels=c_out, kernel_size=2, stride=1, padding=0, im2col_step=im2col_step) model.conv_offset.weight.data = torch.nn.Parameter(torch.Tensor(offset_weight).reshape(8, 1, 2, 2)) model.conv_offset.bias.data = torch.nn.Parameter(torch.Tensor(offset_bias).reshape(8)) model.weight.data = torch.nn.Parameter(torch.Tensor(deform_weight).reshape(1, 1, 2, 2)) model.cuda() out = model(x) out.backward(torch.ones_like(out)) assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out, threshold) assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad, threshold) assert np.allclose((model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size), gt_offset_weight_grad, threshold) assert np.allclose((model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size), gt_offset_bias_grad, threshold) assert np.allclose((model.weight.grad.detach().cpu().numpy() / batch_size), gt_deform_weight_grad, threshold) from mmcv.ops import DeformConv2d model = DeformConv2d(1, 1, 2, stride=1, padding=0) assert (not hasattr(model, 'bias')) with pytest.raises(AssertionError): model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True) with pytest.raises(AssertionError): model = DeformConv2d(3, 2, 3, groups=2) with pytest.raises(AssertionError): model = DeformConv2d(3, 4, 3, groups=3) def test_deformconv(self): self._test_deformconv(torch.double, device='cpu') self._test_deformconv(torch.float, device='cpu', threshold=0.1) self._test_deformconv(torch.double) self._test_deformconv(torch.float) self._test_deformconv(torch.half, threshold=0.1) self._test_deformconv(torch.float, batch_size=1, im2col_step=2) with pytest.raises(AssertionError, match='batch size must be divisible by im2col_step'): self._test_deformconv(torch.float, batch_size=10, im2col_step=3) if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))): with autocast(enabled=True): self._test_amp_deformconv(torch.float, 0.1) self._test_amp_deformconv(torch.half, 0.1)
class TestDeformRoIPool(object): def test_deform_roi_pool_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import DeformRoIPoolPack pool_h = 2 pool_w = 2 spatial_scale = 1.0 sampling_ratio = 2 for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, device='cuda', dtype=torch.float, requires_grad=True) rois = torch.tensor(np_rois, device='cuda', dtype=torch.float) output_c = x.size(1) droipool = DeformRoIPoolPack((pool_h, pool_w), output_c, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio).cuda() if _USING_PARROTS: gradcheck(droipool, (x, rois), no_grads=[rois]) else: gradcheck(droipool, (x, rois), eps=0.01, atol=0.01) def test_modulated_deform_roi_pool_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import ModulatedDeformRoIPoolPack pool_h = 2 pool_w = 2 spatial_scale = 1.0 sampling_ratio = 2 for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, device='cuda', dtype=torch.float, requires_grad=True) rois = torch.tensor(np_rois, device='cuda', dtype=torch.float) output_c = x.size(1) droipool = ModulatedDeformRoIPoolPack((pool_h, pool_w), output_c, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio).cuda() if _USING_PARROTS: gradcheck(droipool, (x, rois), no_grads=[rois]) else: gradcheck(droipool, (x, rois), eps=0.01, atol=0.01)
class Testfocalloss(object): def _test_softmax(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import softmax_focal_loss alpha = 0.25 gamma = 2.0 for (case, output) in zip(inputs, softmax_outputs): np_x = np.array(case[0]) np_y = np.array(case[1]) np_x_grad = np.array(output[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() loss = softmax_focal_loss(x, y, gamma, alpha, None, 'mean') loss.backward() assert np.allclose(loss.data.cpu().numpy(), output[0], 0.01) assert np.allclose(x.grad.data.cpu(), np_x_grad, 0.01) def _test_sigmoid(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import sigmoid_focal_loss alpha = 0.25 gamma = 2.0 for (case, output) in zip(inputs, sigmoid_outputs): np_x = np.array(case[0]) np_y = np.array(case[1]) np_x_grad = np.array(output[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() loss = sigmoid_focal_loss(x, y, gamma, alpha, None, 'mean') loss.backward() assert np.allclose(loss.data.cpu().numpy(), output[0], 0.01) assert np.allclose(x.grad.data.cpu(), np_x_grad, 0.01) def _test_grad_softmax(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import SoftmaxFocalLoss alpha = 0.25 gamma = 2.0 for case in inputs: np_x = np.array(case[0]) np_y = np.array(case[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() floss = SoftmaxFocalLoss(gamma, alpha) if _USING_PARROTS: pass else: gradcheck(floss, (x, y), eps=0.01, atol=0.01) def _test_grad_sigmoid(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import SigmoidFocalLoss alpha = 0.25 gamma = 2.0 for case in inputs: np_x = np.array(case[0]) np_y = np.array(case[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() floss = SigmoidFocalLoss(gamma, alpha) if _USING_PARROTS: pass else: gradcheck(floss, (x, y), eps=0.01, atol=0.01) def test_softmax_float(self): self._test_softmax(dtype=torch.float) def test_softmax_half(self): self._test_softmax(dtype=torch.half) def test_sigmoid_float(self): self._test_sigmoid(dtype=torch.float) def test_sigmoid_half(self): self._test_sigmoid(dtype=torch.half) def test_grad_softmax_float(self): self._test_grad_softmax(dtype=torch.float) def test_grad_sigmoid_float(self): self._test_grad_sigmoid(dtype=torch.float)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_fps(): xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda() idx = furthest_point_sample(xyz, 3) expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda() assert torch.all((idx == expected_idx))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_fps_with_dist(): xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda() expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda() xyz_square_dist = ((xyz.unsqueeze(dim=1) - xyz.unsqueeze(dim=2)) ** 2).sum((- 1)) idx = furthest_point_sample_with_dist(xyz_square_dist, 3) assert torch.all((idx == expected_idx)) import numpy as np fps_idx = np.load('tests/data/for_3d_ops/fps_idx.npy') features_for_fps_distance = np.load('tests/data/for_3d_ops/features_for_fps_distance.npy') expected_idx = torch.from_numpy(fps_idx).cuda() features_for_fps_distance = torch.from_numpy(features_for_fps_distance).cuda() idx = furthest_point_sample_with_dist(features_for_fps_distance, 16) assert torch.all((idx == expected_idx))
class TestFusedBiasLeakyReLU(object): @classmethod def setup_class(cls): if (not torch.cuda.is_available()): return cls.input_tensor = torch.randn((2, 2, 2, 2), requires_grad=True).cuda() cls.bias = torch.zeros(2, requires_grad=True).cuda() @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_gradient(self): from mmcv.ops import FusedBiasLeakyReLU if _USING_PARROTS: gradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, delta=0.0001, pt_atol=0.001) else: gradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, eps=0.0001, atol=0.001) @pytest.mark.skipif(((not torch.cuda.is_available()) or _USING_PARROTS), reason='requires cuda') def test_gradgradient(self): from mmcv.ops import FusedBiasLeakyReLU gradgradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, eps=0.0001, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_gather_points(): features = torch.tensor([[[(- 1.6095), (- 0.1029), (- 0.8876), (- 1.2447), (- 2.4031), 0.3708, (- 1.1586), (- 1.4967), (- 0.48), 0.2252], [1.9138, 3.4979, 1.6854, 1.5631, 3.6776, 3.1154, 2.1705, 2.5221, 2.0411, 3.1446], [(- 1.4173), 0.3073, (- 1.4339), (- 1.434), (- 1.277), (- 0.2867), (- 1.4162), (- 1.4044), (- 1.4245), (- 1.4074)]], [[0.216, 0.0842, 0.3661, (- 0.2749), (- 0.4909), (- 0.6066), (- 0.8773), (- 0.0745), (- 0.9496), 0.1434], [1.3644, 1.8087, 1.6855, 1.9563, 1.2746, 1.9662, 0.9566, 1.8778, 1.1437, 1.3639], [(- 0.7172), 0.1692, 0.2241, 0.0721, (- 0.754), 0.0462, (- 0.6227), 0.3223, (- 0.6944), (- 0.5294)]]]).cuda() idx = torch.tensor([[0, 1, 4, 0, 0, 0], [0, 5, 6, 0, 0, 0]]).int().cuda() output = gather_points(features, idx) expected_output = torch.tensor([[[(- 1.6095), (- 0.1029), (- 2.4031), (- 1.6095), (- 1.6095), (- 1.6095)], [1.9138, 3.4979, 3.6776, 1.9138, 1.9138, 1.9138], [(- 1.4173), 0.3073, (- 1.277), (- 1.4173), (- 1.4173), (- 1.4173)]], [[0.216, (- 0.6066), (- 0.8773), 0.216, 0.216, 0.216], [1.3644, 1.9662, 0.9566, 1.3644, 1.3644, 1.3644], [(- 0.7172), 0.0462, (- 0.6227), (- 0.7172), (- 0.7172), (- 0.7172)]]]).cuda() assert torch.allclose(output, expected_output)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_grouping_points(): idx = torch.tensor([[[0, 0, 0], [3, 3, 3], [8, 8, 8], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [6, 6, 6], [9, 9, 9], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]).int().cuda() festures = torch.tensor([[[0.5798, (- 0.7981), (- 0.928), (- 1.3311), 1.3687, 0.9277, (- 0.4164), (- 1.8274), 0.9268, 0.8414], [5.4247, 1.5113, 2.3944, 1.474, 5.03, 5.103, 1.936, 2.1939, 2.1581, 3.4666], [(- 1.6266), (- 1.0281), (- 1.0393), (- 1.6931), (- 1.3982), (- 0.5732), (- 1.083), (- 1.7561), (- 1.6786), (- 1.6967)]], [[(- 0.038), (- 0.188), (- 1.5724), 0.6905, (- 0.319), 0.7798, (- 0.3693), (- 0.9457), (- 0.2942), (- 1.8527)], [1.1773, 1.5009, 2.6399, 5.9242, 1.0962, 2.7346, 6.0865, 1.5555, 4.3303, 2.8229], [(- 0.6646), (- 0.687), (- 0.1125), (- 0.2224), (- 0.3445), (- 1.4049), 0.499, (- 0.7037), (- 0.9924), 0.0386]]]).cuda() output = grouping_operation(festures, idx) expected_output = torch.tensor([[[[0.5798, 0.5798, 0.5798], [(- 1.3311), (- 1.3311), (- 1.3311)], [0.9268, 0.9268, 0.9268], [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798]], [[5.4247, 5.4247, 5.4247], [1.474, 1.474, 1.474], [2.1581, 2.1581, 2.1581], [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247]], [[(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6931), (- 1.6931), (- 1.6931)], [(- 1.6786), (- 1.6786), (- 1.6786)], [(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6266), (- 1.6266), (- 1.6266)]]], [[[(- 0.038), (- 0.038), (- 0.038)], [(- 0.3693), (- 0.3693), (- 0.3693)], [(- 1.8527), (- 1.8527), (- 1.8527)], [(- 0.038), (- 0.038), (- 0.038)], [(- 0.038), (- 0.038), (- 0.038)], [(- 0.038), (- 0.038), (- 0.038)]], [[1.1773, 1.1773, 1.1773], [6.0865, 6.0865, 6.0865], [2.8229, 2.8229, 2.8229], [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773]], [[(- 0.6646), (- 0.6646), (- 0.6646)], [0.499, 0.499, 0.499], [0.0386, 0.0386, 0.0386], [(- 0.6646), (- 0.6646), (- 0.6646)], [(- 0.6646), (- 0.6646), (- 0.6646)], [(- 0.6646), (- 0.6646), (- 0.6646)]]]]).cuda() assert torch.allclose(output, expected_output)
class TestInfo(object): def test_info(self): if (not torch.cuda.is_available()): return from mmcv.ops import get_compiler_version, get_compiling_cuda_version cv = get_compiler_version() ccv = get_compiling_cuda_version() assert (cv is not None) assert (ccv is not None)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_boxes_iou_bev(): np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.2621, 0.2948, 0.0], [0.0549, 0.1587, 0.0], [0.0, 0.0, 0.0]], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1).cuda() boxes2 = torch.from_numpy(np_boxes2).cuda() ious = boxes_iou_bev(boxes1, boxes2) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_nms_bev(): np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32) np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32) np_inds = np.array([1, 0, 3]) boxes = torch.from_numpy(np_boxes) scores = torch.from_numpy(np_scores) inds = nms_bev(boxes.cuda(), scores.cuda(), thresh=0.3) assert np.allclose(inds.cpu().numpy(), np_inds)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_nms_normal_bev(): np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32) np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32) np_inds = np.array([1, 0, 3]) boxes = torch.from_numpy(np_boxes) scores = torch.from_numpy(np_scores) inds = nms_normal_bev(boxes.cuda(), scores.cuda(), thresh=0.3) assert np.allclose(inds.cpu().numpy(), np_inds)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_knn(): new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0668), 6.0278, (- 0.4875)], [0.4066, 1.4211, (- 0.2947)], [(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0289), 2.4952, (- 0.1708)]]]).cuda() xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [0.5555, 1.0399, (- 1.3634)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.5251), 2.4379, (- 0.8466)], [(- 0.9691), 1.1418, (- 1.3733)], [(- 0.2232), 0.9561, (- 1.3626)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.2822), 1.3192, (- 1.3645)], [0.1533, 1.5024, (- 1.0432)], [0.4917, 1.1529, (- 1.3496)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 0.7188), 0.9956, (- 0.5096)], [(- 2.0668), 6.0278, (- 0.4875)], [(- 1.9304), 3.3092, 0.661], [0.0949, 1.4332, 0.314], [(- 1.2879), 2.0008, (- 0.7791)], [(- 0.7252), 0.9611, (- 0.6371)], [0.4066, 1.4211, (- 0.2947)], [0.322, 1.4447, 0.3548], [(- 0.9744), 2.3856, (- 1.2)]]]).cuda() idx = knn(5, xyz, new_xyz) new_xyz_ = new_xyz.unsqueeze(2).repeat(1, 1, xyz.shape[1], 1) xyz_ = xyz.unsqueeze(1).repeat(1, new_xyz.shape[1], 1, 1) dist = ((new_xyz_ - xyz_) * (new_xyz_ - xyz_)).sum((- 1)) expected_idx = dist.topk(k=5, dim=2, largest=False)[1].transpose(2, 1) assert torch.all((idx == expected_idx)) idx = knn(5, xyz.transpose(1, 2).contiguous(), new_xyz.transpose(1, 2).contiguous(), True) assert torch.all((idx == expected_idx)) idx = knn(5, xyz, xyz) xyz_ = xyz.unsqueeze(2).repeat(1, 1, xyz.shape[1], 1) xyz__ = xyz.unsqueeze(1).repeat(1, xyz.shape[1], 1, 1) dist = ((xyz_ - xyz__) * (xyz_ - xyz__)).sum((- 1)) expected_idx = dist.topk(k=5, dim=2, largest=False)[1].transpose(2, 1) assert torch.all((idx == expected_idx))
class TestMaskedConv2d(object): def test_masked_conv2d(self): if (not torch.cuda.is_available()): return from mmcv.ops import MaskedConv2d input = torch.randn(1, 3, 16, 16, requires_grad=True, device='cuda') mask = torch.randn(1, 16, 16, requires_grad=True, device='cuda') conv = MaskedConv2d(3, 3, 3).cuda() output = conv(input, mask) assert (output is not None)
def test_sum_cell(): inputs_x = torch.randn([2, 256, 32, 32]) inputs_y = torch.randn([2, 256, 16, 16]) sum_cell = SumCell(256, 256) output = sum_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (output.size() == inputs_x.size()) output = sum_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):]) assert (output.size() == inputs_y.size()) output = sum_cell(inputs_x, inputs_y) assert (output.size() == inputs_x.size())
def test_concat_cell(): inputs_x = torch.randn([2, 256, 32, 32]) inputs_y = torch.randn([2, 256, 16, 16]) concat_cell = ConcatCell(256, 256) output = concat_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (output.size() == inputs_x.size()) output = concat_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):]) assert (output.size() == inputs_y.size()) output = concat_cell(inputs_x, inputs_y) assert (output.size() == inputs_x.size())
def test_global_pool_cell(): inputs_x = torch.randn([2, 256, 32, 32]) inputs_y = torch.randn([2, 256, 32, 32]) gp_cell = GlobalPoolingCell(with_out_conv=False) gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (gp_cell_out.size() == inputs_x.size()) gp_cell = GlobalPoolingCell(256, 256) gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (gp_cell_out.size() == inputs_x.size())
def test_resize_methods(): inputs_x = torch.randn([2, 256, 128, 128]) target_resize_sizes = [(128, 128), (256, 256)] resize_methods_list = ['nearest', 'bilinear'] for method in resize_methods_list: merge_cell = BaseMergeCell(upsample_mode=method) for target_size in target_resize_sizes: merge_cell_out = merge_cell._resize(inputs_x, target_size) gt_out = F.interpolate(inputs_x, size=target_size, mode=method) assert merge_cell_out.equal(gt_out) target_size = (64, 64) merge_cell = BaseMergeCell() merge_cell_out = merge_cell._resize(inputs_x, target_size) kernel_size = (inputs_x.shape[(- 1)] // target_size[(- 1)]) gt_out = F.max_pool2d(inputs_x, kernel_size=kernel_size, stride=kernel_size) assert (merge_cell_out == gt_out).all()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_min_area_polygons(): pointsets = torch.from_numpy(np_pointsets).cuda().float() assert np.allclose(min_area_polygons(pointsets).cpu().numpy(), expected_polygons, atol=0.0001)
class TestMdconv(object): def _test_mdconv(self, dtype=torch.float, device='cuda'): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') from mmcv.ops import ModulatedDeformConv2dPack input = torch.tensor(input_t, dtype=dtype, device=device) input.requires_grad = True dcn = ModulatedDeformConv2dPack(1, 1, kernel_size=(2, 2), stride=1, padding=1, deform_groups=1, bias=False) if (device == 'cuda'): dcn.cuda() dcn.weight.data.fill_(1.0) dcn.type(dtype) output = dcn(input) output.sum().backward() assert numpy.allclose(output.cpu().detach().numpy(), output_t, 0.01) assert numpy.allclose(input.grad.cpu().detach().numpy(), input_grad, 0.01) assert numpy.allclose(dcn.weight.grad.cpu().detach().numpy(), dcn_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.weight.grad.cpu().detach().numpy(), dcn_offset_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.bias.grad.cpu().detach().numpy(), dcn_offset_b_grad, 0.01) def _test_amp_mdconv(self, input_dtype=torch.float): 'The function to test amp released on pytorch 1.6.0.\n\n The type of input data might be torch.float or torch.half,\n so we should test mdconv in both cases. With amp, the data\n type of model will NOT be set manually.\n\n Args:\n input_dtype: torch.float or torch.half.\n ' if (not torch.cuda.is_available()): return from mmcv.ops import ModulatedDeformConv2dPack input = torch.tensor(input_t).cuda().type(input_dtype) input.requires_grad = True dcn = ModulatedDeformConv2dPack(1, 1, kernel_size=(2, 2), stride=1, padding=1, deform_groups=1, bias=False).cuda() dcn.weight.data.fill_(1.0) output = dcn(input) output.sum().backward() assert numpy.allclose(output.cpu().detach().numpy(), output_t, 0.01) assert numpy.allclose(input.grad.cpu().detach().numpy(), input_grad, 0.01) assert numpy.allclose(dcn.weight.grad.cpu().detach().numpy(), dcn_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.weight.grad.cpu().detach().numpy(), dcn_offset_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.bias.grad.cpu().detach().numpy(), dcn_offset_b_grad, 0.01) def test_mdconv(self): self._test_mdconv(torch.double, device='cpu') self._test_mdconv(torch.float, device='cpu') self._test_mdconv(torch.double) self._test_mdconv(torch.float) self._test_mdconv(torch.half) if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))): with autocast(enabled=True): self._test_amp_mdconv(torch.float) self._test_amp_mdconv(torch.half)
@pytest.mark.parametrize('device_type', ['cpu', pytest.param('cuda:0', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))]) def test_multiscale_deformable_attention(device_type): with pytest.raises(ValueError): MultiScaleDeformableAttention(embed_dims=256, num_heads=7) device = torch.device(device_type) msda = MultiScaleDeformableAttention(embed_dims=3, num_levels=2, num_heads=3) msda.init_weights() num_query = 5 bs = 1 embed_dims = 3 query = torch.rand(num_query, bs, embed_dims).to(device) key = torch.rand(num_query, bs, embed_dims).to(device) spatial_shapes = torch.Tensor([[2, 2], [1, 1]]).long().to(device) level_start_index = torch.Tensor([0, 4]).long().to(device) reference_points = torch.rand(bs, num_query, 2, 2).to(device) msda.to(device) msda(query, key, key, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index)
def test_forward_multi_scale_deformable_attn_pytorch(): (N, M, D) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long) S = sum([(H * W).item() for (H, W) in shapes]) torch.manual_seed(3) value = (torch.rand(N, S, M, D) * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2) attention_weights = (torch.rand(N, Lq, M, L, P) + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) multi_scale_deformable_attn_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_forward_equal_with_pytorch_double(): (N, M, D) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)])) S = sum([(H * W).item() for (H, W) in shapes]) torch.manual_seed(3) value = (torch.rand(N, S, M, D).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 output_pytorch = multi_scale_deformable_attn_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu() output_cuda = MultiScaleDeformableAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu() assert torch.allclose(output_cuda, output_pytorch) max_abs_err = (output_cuda - output_pytorch).abs().max() max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() assert (max_abs_err < 1e-18) assert (max_rel_err < 1e-15)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_forward_equal_with_pytorch_float(): (N, M, D) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)])) S = sum([(H * W).item() for (H, W) in shapes]) torch.manual_seed(3) value = (torch.rand(N, S, M, D).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 output_pytorch = multi_scale_deformable_attn_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu() output_cuda = MultiScaleDeformableAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu() assert torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001) max_abs_err = (output_cuda - output_pytorch).abs().max() max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() assert (max_abs_err < 1e-09) assert (max_rel_err < 1e-06)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') @pytest.mark.parametrize('channels', [4, 30, 32, 64, 71, 1025]) def test_gradient_numerical(channels, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True): (N, M, _) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(3, 2), (2, 1)], dtype=torch.long).cuda() level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)])) S = sum([(H * W).item() for (H, W) in shapes]) value = (torch.rand(N, S, M, channels).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 func = MultiScaleDeformableAttnFunction.apply value.requires_grad = grad_value sampling_locations.requires_grad = grad_sampling_loc attention_weights.requires_grad = grad_attn_weight if _USING_PARROTS: assert gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step), no_grads=[shapes, level_start_index]) else: assert gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_points_in_polygons(): points = np.array([[300.0, 300.0], [400.0, 400.0], [100.0, 100], [300, 250], [100, 0]]) polygons = np.array([[200.0, 200.0, 400.0, 400.0, 500.0, 200.0, 400.0, 100.0], [400.0, 400.0, 500.0, 500.0, 600.0, 300.0, 500.0, 200.0], [300.0, 300.0, 600.0, 700.0, 700.0, 700.0, 700.0, 100.0]]) expected_output = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) points = torch.from_numpy(points).cuda().float() polygons = torch.from_numpy(polygons).cuda().float() expected_output = torch.from_numpy(expected_output).cuda().float() assert torch.allclose(points_in_polygons(points, polygons), expected_output, 0.001)
class Loss(nn.Module): def __init__(self): super().__init__() def forward(self, input, target): input = input.view((- 1)) target = target.view((- 1)) return torch.mean((input - target))
class TestPSAMask(object): def test_psa_mask_collect(self): if (not torch.cuda.is_available()): return from mmcv.ops import PSAMask test_loss = Loss() input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32) output_collect = np.fromfile('tests/data/for_psa_mask/psa_output_collect.bin', dtype=np.float32) input = input.reshape((4, 16, 8, 8)) output_collect = output_collect.reshape((4, 64, 8, 8)) label = torch.ones((4, 64, 8, 8)) input = torch.FloatTensor(input) input.requires_grad = True psamask_collect = PSAMask('collect', (4, 4)) test_output = psamask_collect(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().numpy() assert np.allclose(test_output, output_collect) assert (test_output.shape == output_collect.shape) psamask_collect.cuda() input = input.cuda() label = label.cuda() test_output = psamask_collect(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().cpu().numpy() assert np.allclose(test_output, output_collect) assert (test_output.shape == output_collect.shape) def test_psa_mask_distribute(self): if (not torch.cuda.is_available()): return from mmcv.ops import PSAMask test_loss = Loss() input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32) output_distribute = np.fromfile('tests/data/for_psa_mask/psa_output_distribute.bin', dtype=np.float32) input = input.reshape((4, 16, 8, 8)) output_distribute = output_distribute.reshape((4, 64, 8, 8)) label = torch.ones((4, 64, 8, 8)) input = torch.FloatTensor(input) input.requires_grad = True psamask_distribute = PSAMask('distribute', (4, 4)) test_output = psamask_distribute(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().numpy() assert np.allclose(test_output, output_distribute) assert (test_output.shape == output_distribute.shape) psamask_distribute.cuda() input = input.cuda() label = label.cuda() test_output = psamask_distribute(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().cpu().numpy() assert np.allclose(test_output, output_distribute) assert (test_output.shape == output_distribute.shape)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_roialign_rotated_gradcheck(): x = torch.tensor(np_feature, dtype=torch.float, device='cuda', requires_grad=True) rois = torch.tensor(np_rois, dtype=torch.float, device='cuda') froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples, num_orientations, clockwise) gradcheck(froipool, (x, rois), eps=0.001, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_roialign_rotated_allclose(): x = torch.tensor(np_feature, dtype=torch.float, device='cuda', requires_grad=True) rois = torch.tensor(np_rois, dtype=torch.float, device='cuda') froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples, num_orientations, clockwise) output = froipool(x, rois) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(torch.float).cpu().numpy(), expect_output, atol=0.001) assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), expect_grad, atol=0.001)
def _test_roialign_gradcheck(device, dtype): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') try: from mmcv.ops import RoIAlign except ModuleNotFoundError: pytest.skip('RoIAlign op is not successfully compiled') if (dtype is torch.half): pytest.skip('grad check does not support fp16') for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True) rois = torch.tensor(np_rois, dtype=dtype, device=device) froipool = RoIAlign((pool_h, pool_w), spatial_scale, sampling_ratio) if (torch.__version__ == 'parrots'): gradcheck(froipool, (x, rois), no_grads=[rois], delta=1e-05, pt_atol=1e-05) else: gradcheck(froipool, (x, rois), eps=1e-05, atol=1e-05)