code
stringlengths
17
6.64M
class TestBoxIoURotated(object): def test_box_iou_rotated_cpu(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.3708, 0.4351, 0.0], [0.1104, 0.4487, 0.0424], [0.0, 0.0, 0.3622]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.3708, 0.4487, 0.3622], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1) boxes2 = torch.from_numpy(np_boxes2) ious = box_iou_rotated(boxes1, boxes2) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_box_iou_rotated_cuda(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.3708, 0.4351, 0.0], [0.1104, 0.4487, 0.0424], [0.0, 0.0, 0.3622]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.3708, 0.4487, 0.3622], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1).cuda() boxes2 = torch.from_numpy(np_boxes2).cuda() ious = box_iou_rotated(boxes1, boxes2) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) def test_box_iou_rotated_iof_cpu(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.4959, 0.5306, 0.0], [0.1823, 0.542, 0.1832], [0.0, 0.0, 0.4404]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.4959, 0.542, 0.4404], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1) boxes2 = torch.from_numpy(np_boxes2) ious = box_iou_rotated(boxes1, boxes2, mode='iof') assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, mode='iof', clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_box_iou_rotated_iof_cuda(self): from mmcv.ops import box_iou_rotated np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.4959, 0.5306, 0.0], [0.1823, 0.542, 0.1832], [0.0, 0.0, 0.4404]], dtype=np.float32) np_expect_ious_aligned = np.asarray([0.4959, 0.542, 0.4404], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1).cuda() boxes2 = torch.from_numpy(np_boxes2).cuda() ious = box_iou_rotated(boxes1, boxes2, mode='iof') assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001) boxes1[(..., (- 1))] *= (- 1) boxes2[(..., (- 1))] *= (- 1) ious = box_iou_rotated(boxes1, boxes2, mode='iof', clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001) ious = box_iou_rotated(boxes1, boxes2, mode='iof', aligned=True, clockwise=False) assert np.allclose(ious.cpu().numpy(), np_expect_ious_aligned, atol=0.0001)
class TestCarafe(object): def test_carafe_naive_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import CARAFENaive feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda').double() mask = torch.randn(2, 100, 6, 6, requires_grad=True, device='cuda').sigmoid().double() gradcheck(CARAFENaive(5, 4, 2), (feat, mask), atol=0.0001, eps=0.0001) def test_carafe_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import CARAFE feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda').double() mask = torch.randn(2, 100, 6, 6, requires_grad=True, device='cuda').sigmoid().double() gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=0.0001, eps=0.0001)
class Loss(nn.Module): def __init__(self): super().__init__() def forward(self, input, target): input = input.view((- 1)) target = target.view((- 1)) return torch.mean((input - target))
class TestCrissCrossAttention(object): def test_cc_attention(self): device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) from mmcv.ops import CrissCrossAttention loss_func = Loss() input = np.fromfile('tests/data/for_ccattention/ccattention_input.bin', dtype=np.float32) output = np.fromfile('tests/data/for_ccattention/ccattention_output.bin', dtype=np.float32) input = input.reshape((1, 32, 45, 45)) output = output.reshape((1, 32, 45, 45)) label = torch.ones((1, 32, 45, 45)) input = torch.FloatTensor(input) output = torch.FloatTensor(output) input.requires_grad = True shape = input.shape channel = shape[1] cca = CrissCrossAttention(channel) cca.to(device) input = input.to(device) label = label.to(device) cca.train() test_output = cca(input) test_loss = loss_func(test_output, label) test_loss.backward() test_output = test_output.detach().cpu().numpy() output = output.numpy() assert np.allclose(test_output, output) assert (test_output.shape == shape)
def test_contour_expand(): from mmcv.ops import contour_expand np_internal_kernel_label = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 1, 1, 0, 0, 0, 0, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).astype(np.int32) np_kernel_mask1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]).astype(np.uint8) np_kernel_mask2 = (np_internal_kernel_label > 0).astype(np.uint8) np_kernel_mask = np.stack([np_kernel_mask1, np_kernel_mask2]) min_area = 1 kernel_region_num = 3 result = contour_expand(np_kernel_mask, np_internal_kernel_label, min_area, kernel_region_num) gt = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 1, 1, 1, 1, 2, 2, 2, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] assert np.allclose(result, gt) np_kernel_mask_t = torch.from_numpy(np_kernel_mask) np_internal_kernel_label_t = torch.from_numpy(np_internal_kernel_label) result = contour_expand(np_kernel_mask_t, np_internal_kernel_label_t, min_area, kernel_region_num) assert np.allclose(result, gt)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_convex_iou(): pointsets = torch.from_numpy(np_pointsets).cuda().float() polygons = torch.from_numpy(np_polygons).cuda().float() expected_iou = torch.from_numpy(np_expected_iou).cuda().float() assert torch.allclose(convex_iou(pointsets, polygons), expected_iou, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_convex_giou(): pointsets = torch.from_numpy(np_pointsets).cuda().float() polygons = torch.from_numpy(np_polygons).cuda().float() expected_giou = torch.from_numpy(np_expected_giou).cuda().float() expected_grad = torch.from_numpy(np_expected_grad).cuda().float() (giou, grad) = convex_giou(pointsets, polygons) assert torch.allclose(giou, expected_giou, atol=0.001) assert torch.allclose(grad, expected_grad, atol=0.001)
def test_corner_pool_device_and_dtypes_cpu(): '\n CommandLine:\n xdoctest -m tests/test_corner_pool.py test_corner_pool_device_and_dtypes_cpu\n ' with pytest.raises(AssertionError): pool = CornerPool('corner') lr_tensor = torch.tensor([[[[0, 0, 0, 0, 0], [2, 1, 3, 0, 2], [5, 4, 1, 1, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]]) tb_tensor = torch.tensor([[[[0, 3, 1, 0, 0], [0, 1, 1, 0, 0], [0, 3, 4, 0, 0], [0, 2, 2, 0, 0], [0, 0, 2, 0, 0]]]]) left_answer = torch.tensor([[[[0, 0, 0, 0, 0], [3, 3, 3, 2, 2], [6, 6, 6, 6, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]]) pool = CornerPool('left') left_tensor = pool(lr_tensor) assert (left_tensor.type() == lr_tensor.type()) assert torch.equal(left_tensor, left_answer) right_answer = torch.tensor([[[[0, 0, 0, 0, 0], [2, 2, 3, 3, 3], [5, 5, 5, 5, 6], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]]) pool = CornerPool('right') right_tensor = pool(lr_tensor) assert (right_tensor.type() == lr_tensor.type()) assert torch.equal(right_tensor, right_answer) top_answer = torch.tensor([[[[0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 2, 2, 0, 0], [0, 0, 2, 0, 0]]]]) pool = CornerPool('top') top_tensor = pool(tb_tensor) assert (top_tensor.type() == tb_tensor.type()) assert torch.equal(top_tensor, top_answer) bottom_answer = torch.tensor([[[[0, 3, 1, 0, 0], [0, 3, 1, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0], [0, 3, 4, 0, 0]]]]) pool = CornerPool('bottom') bottom_tensor = pool(tb_tensor) assert (bottom_tensor.type() == tb_tensor.type()) assert torch.equal(bottom_tensor, bottom_answer)
def assert_equal_tensor(tensor_a, tensor_b): assert tensor_a.eq(tensor_b).all()
class TestCorrelation(): def _test_correlation(self, dtype=torch.float): layer = Correlation(max_displacement=0) input1 = torch.tensor(_input1, dtype=dtype).cuda() input2 = torch.tensor(_input2, dtype=dtype).cuda() input1.requires_grad = True input2.requires_grad = True out = layer(input1, input2) out.backward(torch.ones_like(out)) gt_out = torch.tensor(_gt_out, dtype=dtype).cuda() assert_equal_tensor(out, gt_out) assert_equal_tensor(input1.grad.detach(), input2) assert_equal_tensor(input2.grad.detach(), input1) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_correlation(self): self._test_correlation(torch.float) self._test_correlation(torch.double) self._test_correlation(torch.half)
class TestDeformconv(object): def _test_deformconv(self, dtype=torch.float, threshold=0.001, device='cuda', batch_size=10, im2col_step=2): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') from mmcv.ops import DeformConv2dPack c_in = 1 c_out = 1 batch_size = 10 repeated_input = np.repeat(input, batch_size, axis=0) repeated_gt_out = np.repeat(gt_out, batch_size, axis=0) repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0) x = torch.tensor(repeated_input, device=device, dtype=dtype) x.requires_grad = True model = DeformConv2dPack(in_channels=c_in, out_channels=c_out, kernel_size=2, stride=1, padding=0, im2col_step=im2col_step) model.conv_offset.weight.data = torch.nn.Parameter(torch.Tensor(offset_weight).reshape(8, 1, 2, 2)) model.conv_offset.bias.data = torch.nn.Parameter(torch.Tensor(offset_bias).reshape(8)) model.weight.data = torch.nn.Parameter(torch.Tensor(deform_weight).reshape(1, 1, 2, 2)) if (device == 'cuda'): model.cuda() model.type(dtype) out = model(x) out.backward(torch.ones_like(out)) assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out, threshold) assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad, threshold) assert np.allclose((model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size), gt_offset_weight_grad, threshold) assert np.allclose((model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size), gt_offset_bias_grad, threshold) assert np.allclose((model.weight.grad.detach().cpu().numpy() / batch_size), gt_deform_weight_grad, threshold) from mmcv.ops import DeformConv2d model = DeformConv2d(1, 1, 2, stride=1, padding=0) assert (not hasattr(model, 'bias')) with pytest.raises(AssertionError): model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True) with pytest.raises(AssertionError): model = DeformConv2d(3, 2, 3, groups=2) with pytest.raises(AssertionError): model = DeformConv2d(3, 4, 3, groups=3) def _test_amp_deformconv(self, input_dtype, threshold=0.001, batch_size=10, im2col_step=2): 'The function to test amp released on pytorch 1.6.0.\n\n The type of input data might be torch.float or torch.half,\n so we should test deform_conv in both cases. With amp, the\n data type of model will NOT be set manually.\n\n Args:\n input_dtype: torch.float or torch.half.\n threshold: the same as above function.\n ' if (not torch.cuda.is_available()): return from mmcv.ops import DeformConv2dPack c_in = 1 c_out = 1 repeated_input = np.repeat(input, batch_size, axis=0) repeated_gt_out = np.repeat(gt_out, batch_size, axis=0) repeated_gt_x_grad = np.repeat(gt_x_grad, batch_size, axis=0) x = torch.Tensor(repeated_input).cuda().type(input_dtype) x.requires_grad = True model = DeformConv2dPack(in_channels=c_in, out_channels=c_out, kernel_size=2, stride=1, padding=0, im2col_step=im2col_step) model.conv_offset.weight.data = torch.nn.Parameter(torch.Tensor(offset_weight).reshape(8, 1, 2, 2)) model.conv_offset.bias.data = torch.nn.Parameter(torch.Tensor(offset_bias).reshape(8)) model.weight.data = torch.nn.Parameter(torch.Tensor(deform_weight).reshape(1, 1, 2, 2)) model.cuda() out = model(x) out.backward(torch.ones_like(out)) assert np.allclose(out.data.detach().cpu().numpy(), repeated_gt_out, threshold) assert np.allclose(x.grad.detach().cpu().numpy(), repeated_gt_x_grad, threshold) assert np.allclose((model.conv_offset.weight.grad.detach().cpu().numpy() / batch_size), gt_offset_weight_grad, threshold) assert np.allclose((model.conv_offset.bias.grad.detach().cpu().numpy() / batch_size), gt_offset_bias_grad, threshold) assert np.allclose((model.weight.grad.detach().cpu().numpy() / batch_size), gt_deform_weight_grad, threshold) from mmcv.ops import DeformConv2d model = DeformConv2d(1, 1, 2, stride=1, padding=0) assert (not hasattr(model, 'bias')) with pytest.raises(AssertionError): model = DeformConv2d(1, 1, 2, stride=1, padding=0, bias=True) with pytest.raises(AssertionError): model = DeformConv2d(3, 2, 3, groups=2) with pytest.raises(AssertionError): model = DeformConv2d(3, 4, 3, groups=3) def test_deformconv(self): self._test_deformconv(torch.double, device='cpu') self._test_deformconv(torch.float, device='cpu', threshold=0.1) self._test_deformconv(torch.double) self._test_deformconv(torch.float) self._test_deformconv(torch.half, threshold=0.1) self._test_deformconv(torch.float, batch_size=1, im2col_step=2) with pytest.raises(AssertionError, match='batch size must be divisible by im2col_step'): self._test_deformconv(torch.float, batch_size=10, im2col_step=3) if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))): with autocast(enabled=True): self._test_amp_deformconv(torch.float, 0.1) self._test_amp_deformconv(torch.half, 0.1)
class TestDeformRoIPool(object): def test_deform_roi_pool_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import DeformRoIPoolPack pool_h = 2 pool_w = 2 spatial_scale = 1.0 sampling_ratio = 2 for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, device='cuda', dtype=torch.float, requires_grad=True) rois = torch.tensor(np_rois, device='cuda', dtype=torch.float) output_c = x.size(1) droipool = DeformRoIPoolPack((pool_h, pool_w), output_c, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio).cuda() if _USING_PARROTS: gradcheck(droipool, (x, rois), no_grads=[rois]) else: gradcheck(droipool, (x, rois), eps=0.01, atol=0.01) def test_modulated_deform_roi_pool_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import ModulatedDeformRoIPoolPack pool_h = 2 pool_w = 2 spatial_scale = 1.0 sampling_ratio = 2 for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, device='cuda', dtype=torch.float, requires_grad=True) rois = torch.tensor(np_rois, device='cuda', dtype=torch.float) output_c = x.size(1) droipool = ModulatedDeformRoIPoolPack((pool_h, pool_w), output_c, spatial_scale=spatial_scale, sampling_ratio=sampling_ratio).cuda() if _USING_PARROTS: gradcheck(droipool, (x, rois), no_grads=[rois]) else: gradcheck(droipool, (x, rois), eps=0.01, atol=0.01)
class Testfocalloss(object): def _test_softmax(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import softmax_focal_loss alpha = 0.25 gamma = 2.0 for (case, output) in zip(inputs, softmax_outputs): np_x = np.array(case[0]) np_y = np.array(case[1]) np_x_grad = np.array(output[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() loss = softmax_focal_loss(x, y, gamma, alpha, None, 'mean') loss.backward() assert np.allclose(loss.data.cpu().numpy(), output[0], 0.01) assert np.allclose(x.grad.data.cpu(), np_x_grad, 0.01) def _test_sigmoid(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import sigmoid_focal_loss alpha = 0.25 gamma = 2.0 for (case, output) in zip(inputs, sigmoid_outputs): np_x = np.array(case[0]) np_y = np.array(case[1]) np_x_grad = np.array(output[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() loss = sigmoid_focal_loss(x, y, gamma, alpha, None, 'mean') loss.backward() assert np.allclose(loss.data.cpu().numpy(), output[0], 0.01) assert np.allclose(x.grad.data.cpu(), np_x_grad, 0.01) def _test_grad_softmax(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import SoftmaxFocalLoss alpha = 0.25 gamma = 2.0 for case in inputs: np_x = np.array(case[0]) np_y = np.array(case[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() floss = SoftmaxFocalLoss(gamma, alpha) if _USING_PARROTS: pass else: gradcheck(floss, (x, y), eps=0.01, atol=0.01) def _test_grad_sigmoid(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import SigmoidFocalLoss alpha = 0.25 gamma = 2.0 for case in inputs: np_x = np.array(case[0]) np_y = np.array(case[1]) x = torch.from_numpy(np_x).cuda().type(dtype) x.requires_grad_() y = torch.from_numpy(np_y).cuda().long() floss = SigmoidFocalLoss(gamma, alpha) if _USING_PARROTS: pass else: gradcheck(floss, (x, y), eps=0.01, atol=0.01) def test_softmax_float(self): self._test_softmax(dtype=torch.float) def test_softmax_half(self): self._test_softmax(dtype=torch.half) def test_sigmoid_float(self): self._test_sigmoid(dtype=torch.float) def test_sigmoid_half(self): self._test_sigmoid(dtype=torch.half) def test_grad_softmax_float(self): self._test_grad_softmax(dtype=torch.float) def test_grad_sigmoid_float(self): self._test_grad_sigmoid(dtype=torch.float)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_fps(): xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda() idx = furthest_point_sample(xyz, 3) expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda() assert torch.all((idx == expected_idx))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_fps_with_dist(): xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda() expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda() xyz_square_dist = ((xyz.unsqueeze(dim=1) - xyz.unsqueeze(dim=2)) ** 2).sum((- 1)) idx = furthest_point_sample_with_dist(xyz_square_dist, 3) assert torch.all((idx == expected_idx)) import numpy as np fps_idx = np.load('tests/data/for_3d_ops/fps_idx.npy') features_for_fps_distance = np.load('tests/data/for_3d_ops/features_for_fps_distance.npy') expected_idx = torch.from_numpy(fps_idx).cuda() features_for_fps_distance = torch.from_numpy(features_for_fps_distance).cuda() idx = furthest_point_sample_with_dist(features_for_fps_distance, 16) assert torch.all((idx == expected_idx))
class TestFusedBiasLeakyReLU(object): @classmethod def setup_class(cls): if (not torch.cuda.is_available()): return cls.input_tensor = torch.randn((2, 2, 2, 2), requires_grad=True).cuda() cls.bias = torch.zeros(2, requires_grad=True).cuda() @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_gradient(self): from mmcv.ops import FusedBiasLeakyReLU if _USING_PARROTS: gradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, delta=0.0001, pt_atol=0.001) else: gradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, eps=0.0001, atol=0.001) @pytest.mark.skipif(((not torch.cuda.is_available()) or _USING_PARROTS), reason='requires cuda') def test_gradgradient(self): from mmcv.ops import FusedBiasLeakyReLU gradgradcheck(FusedBiasLeakyReLU(2).cuda(), self.input_tensor, eps=0.0001, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_gather_points(): features = torch.tensor([[[(- 1.6095), (- 0.1029), (- 0.8876), (- 1.2447), (- 2.4031), 0.3708, (- 1.1586), (- 1.4967), (- 0.48), 0.2252], [1.9138, 3.4979, 1.6854, 1.5631, 3.6776, 3.1154, 2.1705, 2.5221, 2.0411, 3.1446], [(- 1.4173), 0.3073, (- 1.4339), (- 1.434), (- 1.277), (- 0.2867), (- 1.4162), (- 1.4044), (- 1.4245), (- 1.4074)]], [[0.216, 0.0842, 0.3661, (- 0.2749), (- 0.4909), (- 0.6066), (- 0.8773), (- 0.0745), (- 0.9496), 0.1434], [1.3644, 1.8087, 1.6855, 1.9563, 1.2746, 1.9662, 0.9566, 1.8778, 1.1437, 1.3639], [(- 0.7172), 0.1692, 0.2241, 0.0721, (- 0.754), 0.0462, (- 0.6227), 0.3223, (- 0.6944), (- 0.5294)]]]).cuda() idx = torch.tensor([[0, 1, 4, 0, 0, 0], [0, 5, 6, 0, 0, 0]]).int().cuda() output = gather_points(features, idx) expected_output = torch.tensor([[[(- 1.6095), (- 0.1029), (- 2.4031), (- 1.6095), (- 1.6095), (- 1.6095)], [1.9138, 3.4979, 3.6776, 1.9138, 1.9138, 1.9138], [(- 1.4173), 0.3073, (- 1.277), (- 1.4173), (- 1.4173), (- 1.4173)]], [[0.216, (- 0.6066), (- 0.8773), 0.216, 0.216, 0.216], [1.3644, 1.9662, 0.9566, 1.3644, 1.3644, 1.3644], [(- 0.7172), 0.0462, (- 0.6227), (- 0.7172), (- 0.7172), (- 0.7172)]]]).cuda() assert torch.allclose(output, expected_output)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_grouping_points(): idx = torch.tensor([[[0, 0, 0], [3, 3, 3], [8, 8, 8], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [6, 6, 6], [9, 9, 9], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]).int().cuda() festures = torch.tensor([[[0.5798, (- 0.7981), (- 0.928), (- 1.3311), 1.3687, 0.9277, (- 0.4164), (- 1.8274), 0.9268, 0.8414], [5.4247, 1.5113, 2.3944, 1.474, 5.03, 5.103, 1.936, 2.1939, 2.1581, 3.4666], [(- 1.6266), (- 1.0281), (- 1.0393), (- 1.6931), (- 1.3982), (- 0.5732), (- 1.083), (- 1.7561), (- 1.6786), (- 1.6967)]], [[(- 0.038), (- 0.188), (- 1.5724), 0.6905, (- 0.319), 0.7798, (- 0.3693), (- 0.9457), (- 0.2942), (- 1.8527)], [1.1773, 1.5009, 2.6399, 5.9242, 1.0962, 2.7346, 6.0865, 1.5555, 4.3303, 2.8229], [(- 0.6646), (- 0.687), (- 0.1125), (- 0.2224), (- 0.3445), (- 1.4049), 0.499, (- 0.7037), (- 0.9924), 0.0386]]]).cuda() output = grouping_operation(festures, idx) expected_output = torch.tensor([[[[0.5798, 0.5798, 0.5798], [(- 1.3311), (- 1.3311), (- 1.3311)], [0.9268, 0.9268, 0.9268], [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798]], [[5.4247, 5.4247, 5.4247], [1.474, 1.474, 1.474], [2.1581, 2.1581, 2.1581], [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247]], [[(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6931), (- 1.6931), (- 1.6931)], [(- 1.6786), (- 1.6786), (- 1.6786)], [(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6266), (- 1.6266), (- 1.6266)], [(- 1.6266), (- 1.6266), (- 1.6266)]]], [[[(- 0.038), (- 0.038), (- 0.038)], [(- 0.3693), (- 0.3693), (- 0.3693)], [(- 1.8527), (- 1.8527), (- 1.8527)], [(- 0.038), (- 0.038), (- 0.038)], [(- 0.038), (- 0.038), (- 0.038)], [(- 0.038), (- 0.038), (- 0.038)]], [[1.1773, 1.1773, 1.1773], [6.0865, 6.0865, 6.0865], [2.8229, 2.8229, 2.8229], [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773]], [[(- 0.6646), (- 0.6646), (- 0.6646)], [0.499, 0.499, 0.499], [0.0386, 0.0386, 0.0386], [(- 0.6646), (- 0.6646), (- 0.6646)], [(- 0.6646), (- 0.6646), (- 0.6646)], [(- 0.6646), (- 0.6646), (- 0.6646)]]]]).cuda() assert torch.allclose(output, expected_output)
class TestInfo(object): def test_info(self): if (not torch.cuda.is_available()): return from mmcv.ops import get_compiler_version, get_compiling_cuda_version cv = get_compiler_version() ccv = get_compiling_cuda_version() assert (cv is not None) assert (ccv is not None)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_boxes_iou_bev(): np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32) np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32) np_expect_ious = np.asarray([[0.2621, 0.2948, 0.0], [0.0549, 0.1587, 0.0], [0.0, 0.0, 0.0]], dtype=np.float32) boxes1 = torch.from_numpy(np_boxes1).cuda() boxes2 = torch.from_numpy(np_boxes2).cuda() ious = boxes_iou_bev(boxes1, boxes2) assert np.allclose(ious.cpu().numpy(), np_expect_ious, atol=0.0001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_nms_bev(): np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32) np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32) np_inds = np.array([1, 0, 3]) boxes = torch.from_numpy(np_boxes) scores = torch.from_numpy(np_scores) inds = nms_bev(boxes.cuda(), scores.cuda(), thresh=0.3) assert np.allclose(inds.cpu().numpy(), np_inds)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_nms_normal_bev(): np_boxes = np.array([[6.0, 3.0, 8.0, 7.0, 2.0], [3.0, 6.0, 9.0, 11.0, 1.0], [3.0, 7.0, 10.0, 12.0, 1.0], [1.0, 4.0, 13.0, 7.0, 3.0]], dtype=np.float32) np_scores = np.array([0.6, 0.9, 0.7, 0.2], dtype=np.float32) np_inds = np.array([1, 0, 3]) boxes = torch.from_numpy(np_boxes) scores = torch.from_numpy(np_scores) inds = nms_normal_bev(boxes.cuda(), scores.cuda(), thresh=0.3) assert np.allclose(inds.cpu().numpy(), np_inds)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_knn(): new_xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.074), 1.3147, (- 1.3625)], [(- 0.074), 1.3147, (- 1.3625)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0668), 6.0278, (- 0.4875)], [0.4066, 1.4211, (- 0.2947)], [(- 2.0289), 2.4952, (- 0.1708)], [(- 2.0289), 2.4952, (- 0.1708)]]]).cuda() xyz = torch.tensor([[[(- 0.074), 1.3147, (- 1.3625)], [0.5555, 1.0399, (- 1.3634)], [(- 0.4003), 2.4666, (- 0.5116)], [(- 0.5251), 2.4379, (- 0.8466)], [(- 0.9691), 1.1418, (- 1.3733)], [(- 0.2232), 0.9561, (- 1.3626)], [(- 2.2769), 2.7817, (- 0.2334)], [(- 0.2822), 1.3192, (- 1.3645)], [0.1533, 1.5024, (- 1.0432)], [0.4917, 1.1529, (- 1.3496)]], [[(- 2.0289), 2.4952, (- 0.1708)], [(- 0.7188), 0.9956, (- 0.5096)], [(- 2.0668), 6.0278, (- 0.4875)], [(- 1.9304), 3.3092, 0.661], [0.0949, 1.4332, 0.314], [(- 1.2879), 2.0008, (- 0.7791)], [(- 0.7252), 0.9611, (- 0.6371)], [0.4066, 1.4211, (- 0.2947)], [0.322, 1.4447, 0.3548], [(- 0.9744), 2.3856, (- 1.2)]]]).cuda() idx = knn(5, xyz, new_xyz) new_xyz_ = new_xyz.unsqueeze(2).repeat(1, 1, xyz.shape[1], 1) xyz_ = xyz.unsqueeze(1).repeat(1, new_xyz.shape[1], 1, 1) dist = ((new_xyz_ - xyz_) * (new_xyz_ - xyz_)).sum((- 1)) expected_idx = dist.topk(k=5, dim=2, largest=False)[1].transpose(2, 1) assert torch.all((idx == expected_idx)) idx = knn(5, xyz.transpose(1, 2).contiguous(), new_xyz.transpose(1, 2).contiguous(), True) assert torch.all((idx == expected_idx)) idx = knn(5, xyz, xyz) xyz_ = xyz.unsqueeze(2).repeat(1, 1, xyz.shape[1], 1) xyz__ = xyz.unsqueeze(1).repeat(1, xyz.shape[1], 1, 1) dist = ((xyz_ - xyz__) * (xyz_ - xyz__)).sum((- 1)) expected_idx = dist.topk(k=5, dim=2, largest=False)[1].transpose(2, 1) assert torch.all((idx == expected_idx))
class TestMaskedConv2d(object): def test_masked_conv2d(self): if (not torch.cuda.is_available()): return from mmcv.ops import MaskedConv2d input = torch.randn(1, 3, 16, 16, requires_grad=True, device='cuda') mask = torch.randn(1, 16, 16, requires_grad=True, device='cuda') conv = MaskedConv2d(3, 3, 3).cuda() output = conv(input, mask) assert (output is not None)
def test_sum_cell(): inputs_x = torch.randn([2, 256, 32, 32]) inputs_y = torch.randn([2, 256, 16, 16]) sum_cell = SumCell(256, 256) output = sum_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (output.size() == inputs_x.size()) output = sum_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):]) assert (output.size() == inputs_y.size()) output = sum_cell(inputs_x, inputs_y) assert (output.size() == inputs_x.size())
def test_concat_cell(): inputs_x = torch.randn([2, 256, 32, 32]) inputs_y = torch.randn([2, 256, 16, 16]) concat_cell = ConcatCell(256, 256) output = concat_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (output.size() == inputs_x.size()) output = concat_cell(inputs_x, inputs_y, out_size=inputs_y.shape[(- 2):]) assert (output.size() == inputs_y.size()) output = concat_cell(inputs_x, inputs_y) assert (output.size() == inputs_x.size())
def test_global_pool_cell(): inputs_x = torch.randn([2, 256, 32, 32]) inputs_y = torch.randn([2, 256, 32, 32]) gp_cell = GlobalPoolingCell(with_out_conv=False) gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (gp_cell_out.size() == inputs_x.size()) gp_cell = GlobalPoolingCell(256, 256) gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):]) assert (gp_cell_out.size() == inputs_x.size())
def test_resize_methods(): inputs_x = torch.randn([2, 256, 128, 128]) target_resize_sizes = [(128, 128), (256, 256)] resize_methods_list = ['nearest', 'bilinear'] for method in resize_methods_list: merge_cell = BaseMergeCell(upsample_mode=method) for target_size in target_resize_sizes: merge_cell_out = merge_cell._resize(inputs_x, target_size) gt_out = F.interpolate(inputs_x, size=target_size, mode=method) assert merge_cell_out.equal(gt_out) target_size = (64, 64) merge_cell = BaseMergeCell() merge_cell_out = merge_cell._resize(inputs_x, target_size) kernel_size = (inputs_x.shape[(- 1)] // target_size[(- 1)]) gt_out = F.max_pool2d(inputs_x, kernel_size=kernel_size, stride=kernel_size) assert (merge_cell_out == gt_out).all()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_min_area_polygons(): pointsets = torch.from_numpy(np_pointsets).cuda().float() assert np.allclose(min_area_polygons(pointsets).cpu().numpy(), expected_polygons, atol=0.0001)
class TestMdconv(object): def _test_mdconv(self, dtype=torch.float, device='cuda'): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') from mmcv.ops import ModulatedDeformConv2dPack input = torch.tensor(input_t, dtype=dtype, device=device) input.requires_grad = True dcn = ModulatedDeformConv2dPack(1, 1, kernel_size=(2, 2), stride=1, padding=1, deform_groups=1, bias=False) if (device == 'cuda'): dcn.cuda() dcn.weight.data.fill_(1.0) dcn.type(dtype) output = dcn(input) output.sum().backward() assert numpy.allclose(output.cpu().detach().numpy(), output_t, 0.01) assert numpy.allclose(input.grad.cpu().detach().numpy(), input_grad, 0.01) assert numpy.allclose(dcn.weight.grad.cpu().detach().numpy(), dcn_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.weight.grad.cpu().detach().numpy(), dcn_offset_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.bias.grad.cpu().detach().numpy(), dcn_offset_b_grad, 0.01) def _test_amp_mdconv(self, input_dtype=torch.float): 'The function to test amp released on pytorch 1.6.0.\n\n The type of input data might be torch.float or torch.half,\n so we should test mdconv in both cases. With amp, the data\n type of model will NOT be set manually.\n\n Args:\n input_dtype: torch.float or torch.half.\n ' if (not torch.cuda.is_available()): return from mmcv.ops import ModulatedDeformConv2dPack input = torch.tensor(input_t).cuda().type(input_dtype) input.requires_grad = True dcn = ModulatedDeformConv2dPack(1, 1, kernel_size=(2, 2), stride=1, padding=1, deform_groups=1, bias=False).cuda() dcn.weight.data.fill_(1.0) output = dcn(input) output.sum().backward() assert numpy.allclose(output.cpu().detach().numpy(), output_t, 0.01) assert numpy.allclose(input.grad.cpu().detach().numpy(), input_grad, 0.01) assert numpy.allclose(dcn.weight.grad.cpu().detach().numpy(), dcn_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.weight.grad.cpu().detach().numpy(), dcn_offset_w_grad, 0.01) assert numpy.allclose(dcn.conv_offset.bias.grad.cpu().detach().numpy(), dcn_offset_b_grad, 0.01) def test_mdconv(self): self._test_mdconv(torch.double, device='cpu') self._test_mdconv(torch.float, device='cpu') self._test_mdconv(torch.double) self._test_mdconv(torch.float) self._test_mdconv(torch.half) if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))): with autocast(enabled=True): self._test_amp_mdconv(torch.float) self._test_amp_mdconv(torch.half)
@pytest.mark.parametrize('device_type', ['cpu', pytest.param('cuda:0', marks=pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support'))]) def test_multiscale_deformable_attention(device_type): with pytest.raises(ValueError): MultiScaleDeformableAttention(embed_dims=256, num_heads=7) device = torch.device(device_type) msda = MultiScaleDeformableAttention(embed_dims=3, num_levels=2, num_heads=3) msda.init_weights() num_query = 5 bs = 1 embed_dims = 3 query = torch.rand(num_query, bs, embed_dims).to(device) key = torch.rand(num_query, bs, embed_dims).to(device) spatial_shapes = torch.Tensor([[2, 2], [1, 1]]).long().to(device) level_start_index = torch.Tensor([0, 4]).long().to(device) reference_points = torch.rand(bs, num_query, 2, 2).to(device) msda.to(device) msda(query, key, key, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index)
def test_forward_multi_scale_deformable_attn_pytorch(): (N, M, D) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long) S = sum([(H * W).item() for (H, W) in shapes]) torch.manual_seed(3) value = (torch.rand(N, S, M, D) * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2) attention_weights = (torch.rand(N, Lq, M, L, P) + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) multi_scale_deformable_attn_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_forward_equal_with_pytorch_double(): (N, M, D) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)])) S = sum([(H * W).item() for (H, W) in shapes]) torch.manual_seed(3) value = (torch.rand(N, S, M, D).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 output_pytorch = multi_scale_deformable_attn_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu() output_cuda = MultiScaleDeformableAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu() assert torch.allclose(output_cuda, output_pytorch) max_abs_err = (output_cuda - output_pytorch).abs().max() max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() assert (max_abs_err < 1e-18) assert (max_rel_err < 1e-15)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_forward_equal_with_pytorch_float(): (N, M, D) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda() level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)])) S = sum([(H * W).item() for (H, W) in shapes]) torch.manual_seed(3) value = (torch.rand(N, S, M, D).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 output_pytorch = multi_scale_deformable_attn_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu() output_cuda = MultiScaleDeformableAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu() assert torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001) max_abs_err = (output_cuda - output_pytorch).abs().max() max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max() assert (max_abs_err < 1e-09) assert (max_rel_err < 1e-06)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') @pytest.mark.parametrize('channels', [4, 30, 32, 64, 71, 1025]) def test_gradient_numerical(channels, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True): (N, M, _) = (1, 2, 2) (Lq, L, P) = (2, 2, 2) shapes = torch.as_tensor([(3, 2), (2, 1)], dtype=torch.long).cuda() level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)])) S = sum([(H * W).item() for (H, W) in shapes]) value = (torch.rand(N, S, M, channels).cuda() * 0.01) sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda() attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05) attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True) im2col_step = 2 func = MultiScaleDeformableAttnFunction.apply value.requires_grad = grad_value sampling_locations.requires_grad = grad_sampling_loc attention_weights.requires_grad = grad_attn_weight if _USING_PARROTS: assert gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step), no_grads=[shapes, level_start_index]) else: assert gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_points_in_polygons(): points = np.array([[300.0, 300.0], [400.0, 400.0], [100.0, 100], [300, 250], [100, 0]]) polygons = np.array([[200.0, 200.0, 400.0, 400.0, 500.0, 200.0, 400.0, 100.0], [400.0, 400.0, 500.0, 500.0, 600.0, 300.0, 500.0, 200.0], [300.0, 300.0, 600.0, 700.0, 700.0, 700.0, 700.0, 100.0]]) expected_output = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) points = torch.from_numpy(points).cuda().float() polygons = torch.from_numpy(polygons).cuda().float() expected_output = torch.from_numpy(expected_output).cuda().float() assert torch.allclose(points_in_polygons(points, polygons), expected_output, 0.001)
class Loss(nn.Module): def __init__(self): super().__init__() def forward(self, input, target): input = input.view((- 1)) target = target.view((- 1)) return torch.mean((input - target))
class TestPSAMask(object): def test_psa_mask_collect(self): if (not torch.cuda.is_available()): return from mmcv.ops import PSAMask test_loss = Loss() input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32) output_collect = np.fromfile('tests/data/for_psa_mask/psa_output_collect.bin', dtype=np.float32) input = input.reshape((4, 16, 8, 8)) output_collect = output_collect.reshape((4, 64, 8, 8)) label = torch.ones((4, 64, 8, 8)) input = torch.FloatTensor(input) input.requires_grad = True psamask_collect = PSAMask('collect', (4, 4)) test_output = psamask_collect(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().numpy() assert np.allclose(test_output, output_collect) assert (test_output.shape == output_collect.shape) psamask_collect.cuda() input = input.cuda() label = label.cuda() test_output = psamask_collect(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().cpu().numpy() assert np.allclose(test_output, output_collect) assert (test_output.shape == output_collect.shape) def test_psa_mask_distribute(self): if (not torch.cuda.is_available()): return from mmcv.ops import PSAMask test_loss = Loss() input = np.fromfile('tests/data/for_psa_mask/psa_input.bin', dtype=np.float32) output_distribute = np.fromfile('tests/data/for_psa_mask/psa_output_distribute.bin', dtype=np.float32) input = input.reshape((4, 16, 8, 8)) output_distribute = output_distribute.reshape((4, 64, 8, 8)) label = torch.ones((4, 64, 8, 8)) input = torch.FloatTensor(input) input.requires_grad = True psamask_distribute = PSAMask('distribute', (4, 4)) test_output = psamask_distribute(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().numpy() assert np.allclose(test_output, output_distribute) assert (test_output.shape == output_distribute.shape) psamask_distribute.cuda() input = input.cuda() label = label.cuda() test_output = psamask_distribute(input) loss = test_loss(test_output, label) loss.backward() test_output = test_output.detach().cpu().numpy() assert np.allclose(test_output, output_distribute) assert (test_output.shape == output_distribute.shape)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_roialign_rotated_gradcheck(): x = torch.tensor(np_feature, dtype=torch.float, device='cuda', requires_grad=True) rois = torch.tensor(np_rois, dtype=torch.float, device='cuda') froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples, num_orientations, clockwise) gradcheck(froipool, (x, rois), eps=0.001, atol=0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_roialign_rotated_allclose(): x = torch.tensor(np_feature, dtype=torch.float, device='cuda', requires_grad=True) rois = torch.tensor(np_rois, dtype=torch.float, device='cuda') froipool = RiRoIAlignRotated((pool_h, pool_w), spatial_scale, num_samples, num_orientations, clockwise) output = froipool(x, rois) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(torch.float).cpu().numpy(), expect_output, atol=0.001) assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), expect_grad, atol=0.001)
def _test_roialign_gradcheck(device, dtype): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') try: from mmcv.ops import RoIAlign except ModuleNotFoundError: pytest.skip('RoIAlign op is not successfully compiled') if (dtype is torch.half): pytest.skip('grad check does not support fp16') for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True) rois = torch.tensor(np_rois, dtype=dtype, device=device) froipool = RoIAlign((pool_h, pool_w), spatial_scale, sampling_ratio) if (torch.__version__ == 'parrots'): gradcheck(froipool, (x, rois), no_grads=[rois], delta=1e-05, pt_atol=1e-05) else: gradcheck(froipool, (x, rois), eps=1e-05, atol=1e-05)
def _test_roialign_allclose(device, dtype): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('test requires GPU') try: from mmcv.ops import roi_align except ModuleNotFoundError: pytest.skip('test requires compilation') pool_h = 2 pool_w = 2 spatial_scale = 1.0 sampling_ratio = 2 for (case, output) in zip(inputs, outputs): np_input = np.array(case[0]) np_rois = np.array(case[1]) np_output = np.array(output[0]) np_grad = np.array(output[1]) x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True) rois = torch.tensor(np_rois, dtype=dtype, device=device) output = roi_align(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, 'avg', True) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001) assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001)
@pytest.mark.parametrize('device', ['cuda', 'cpu']) @pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half]) def test_roialign(device, dtype): if (dtype is torch.double): _test_roialign_gradcheck(device=device, dtype=dtype) _test_roialign_allclose(device=device, dtype=dtype)
def _test_roialign_rotated_gradcheck(device, dtype): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('unittest does not support GPU yet.') try: from mmcv.ops import RoIAlignRotated except ModuleNotFoundError: pytest.skip('RoIAlignRotated op is not successfully compiled') if (dtype is torch.half): pytest.skip('grad check does not support fp16') for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True) rois = torch.tensor(np_rois, dtype=dtype, device=device) froipool = RoIAlignRotated((pool_h, pool_w), spatial_scale, sampling_ratio) if (torch.__version__ == 'parrots'): gradcheck(froipool, (x, rois), no_grads=[rois], delta=1e-05, pt_atol=1e-05) else: gradcheck(froipool, (x, rois), eps=1e-05, atol=1e-05)
def _test_roialign_rotated_allclose(device, dtype): if ((not torch.cuda.is_available()) and (device == 'cuda')): pytest.skip('unittest does not support GPU yet.') try: from mmcv.ops import RoIAlignRotated, roi_align_rotated except ModuleNotFoundError: pytest.skip('test requires compilation') pool_h = 2 pool_w = 2 spatial_scale = 1.0 sampling_ratio = 2 for (case, output) in zip(inputs, outputs): np_input = np.array(case[0]) np_rois = np.array(case[1]) np_output = np.array(output[0]) np_grad = np.array(output[1]) x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True) rois = torch.tensor(np_rois, dtype=dtype, device=device) output = roi_align_rotated(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, True) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001) assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001) roi_align_rotated_module_deprecated = RoIAlignRotated(out_size=(pool_h, pool_w), spatial_scale=spatial_scale, sample_num=sampling_ratio) output_1 = roi_align_rotated_module_deprecated(x, rois) roi_align_rotated_module_new = RoIAlignRotated(output_size=(pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio) output_2 = roi_align_rotated_module_new(x, rois) assert np.allclose(output_1.data.type(torch.float).cpu().numpy(), output_2.data.type(torch.float).cpu().numpy())
@pytest.mark.parametrize('device', ['cuda', 'cpu']) @pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half]) def test_roialign_rotated(device, dtype): if (dtype is torch.double): _test_roialign_rotated_gradcheck(device=device, dtype=dtype) _test_roialign_rotated_allclose(device=device, dtype=dtype)
class TestRoiPool(object): def test_roipool_gradcheck(self): if (not torch.cuda.is_available()): return from mmcv.ops import RoIPool pool_h = 2 pool_w = 2 spatial_scale = 1.0 for case in inputs: np_input = np.array(case[0]) np_rois = np.array(case[1]) x = torch.tensor(np_input, device='cuda', requires_grad=True) rois = torch.tensor(np_rois, device='cuda') froipool = RoIPool((pool_h, pool_w), spatial_scale) if _USING_PARROTS: pass else: gradcheck(froipool, (x, rois), eps=0.01, atol=0.01) def _test_roipool_allclose(self, dtype=torch.float): if (not torch.cuda.is_available()): return from mmcv.ops import roi_pool pool_h = 2 pool_w = 2 spatial_scale = 1.0 for (case, output) in zip(inputs, outputs): np_input = np.array(case[0]) np_rois = np.array(case[1]) np_output = np.array(output[0]) np_grad = np.array(output[1]) x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True) rois = torch.tensor(np_rois, dtype=dtype, device='cuda') output = roi_pool(x, rois, (pool_h, pool_w), spatial_scale) output.backward(torch.ones_like(output)) assert np.allclose(output.data.cpu().numpy(), np_output, 0.001) assert np.allclose(x.grad.data.cpu().numpy(), np_grad, 0.001) def test_roipool_allclose(self): self._test_roipool_allclose(torch.double) self._test_roipool_allclose(torch.float) self._test_roipool_allclose(torch.half)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_RoIAwarePool3d(): roiaware_pool3d_max = RoIAwarePool3d(out_size=4, max_pts_per_voxel=128, mode='max') roiaware_pool3d_avg = RoIAwarePool3d(out_size=4, max_pts_per_voxel=128, mode='avg') rois = torch.tensor([[1.0, 2.0, 3.0, 5.0, 4.0, 6.0, ((- 0.3) - (np.pi / 2))], [(- 10.0), 23.0, 16.0, 20.0, 10.0, 20.0, ((- 0.5) - (np.pi / 2))]], dtype=torch.float32).cuda() pts = torch.tensor([[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]], dtype=torch.float32).cuda() pts_feature = pts.clone() pooled_features_max = roiaware_pool3d_max(rois=rois, pts=pts, pts_feature=pts_feature) assert (pooled_features_max.shape == torch.Size([2, 4, 4, 4, 3])) assert torch.allclose(pooled_features_max.sum(), torch.tensor(51.1).cuda(), 0.001) pooled_features_avg = roiaware_pool3d_avg(rois=rois, pts=pts, pts_feature=pts_feature) assert (pooled_features_avg.shape == torch.Size([2, 4, 4, 4, 3])) assert torch.allclose(pooled_features_avg.sum(), torch.tensor(49.75).cuda(), 0.001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_points_in_boxes_part(): boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]], [[(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda() pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)]], [[3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)], [6, 4, 9]]], dtype=torch.float32).cuda() point_indices = points_in_boxes_part(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[0, 0, 0, 0, 0, (- 1), (- 1), (- 1)], [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda() assert (point_indices.shape == torch.Size([2, 8])) assert (point_indices == expected_point_indices).all() boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32).cuda() pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32).cuda() point_indices = points_in_boxes_part(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[(- 1), (- 1), 0, (- 1), 0, (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda() assert (point_indices == expected_point_indices).all()
def test_points_in_boxes_cpu(): boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32) pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]]], dtype=torch.float32) point_indices = points_in_boxes_cpu(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]], dtype=torch.int32) assert (point_indices.shape == torch.Size([1, 15, 2])) assert (point_indices == expected_point_indices).all() boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32) pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32) point_indices = points_in_boxes_cpu(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[[0], [0], [1], [0], [1], [0], [0], [0]]], dtype=torch.int32) assert (point_indices == expected_point_indices).all()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_points_in_boxes_all(): boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda() pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]]], dtype=torch.float32).cuda() point_indices = points_in_boxes_all(points=pts, boxes=boxes) expected_point_indices = torch.tensor([[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]], dtype=torch.int32).cuda() assert (point_indices.shape == torch.Size([1, 15, 2])) assert (point_indices == expected_point_indices).all()
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_gather_points(): feats = torch.tensor([[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]], dtype=torch.float32).unsqueeze(0).cuda() points = feats.clone() rois = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda() roipoint_pool3d = RoIPointPool3d(num_sampled_points=4) (roi_feat, empty_flag) = roipoint_pool3d(feats, points, rois) expected_roi_feat = torch.tensor([[[[1, 2, 3.3, 1, 2, 3.3], [1.2, 2.5, 3, 1.2, 2.5, 3], [0.8, 2.1, 3.5, 0.8, 2.1, 3.5], [1.6, 2.6, 3.6, 1.6, 2.6, 3.6]], [[(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2]]]]).cuda() expected_empty_flag = torch.tensor([[0, 0]]).int().cuda() assert torch.allclose(roi_feat, expected_roi_feat) assert torch.allclose(empty_flag, expected_empty_flag)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_rotated_feature_align(): feature = torch.tensor([[[[1.2924, (- 0.2172), (- 0.5222), 0.1172], [0.9144, 1.2248, 1.3115, (- 0.969)], [(- 0.8949), (- 1.1797), (- 0.9093), (- 0.3961)], [(- 0.4586), 0.5062, (- 0.7947), (- 0.7397)]], [[(- 1.0943), (- 0.7495), 1.3461, (- 1.1652)], [0.2034, 0.6763, (- 1.2357), 0.5231], [(- 1.0062), 1.2592, 1.4225, (- 0.3951)], [(- 0.1242), (- 1.624), 0.1932, 2.7181]], [[(- 1.6271), (- 1.0276), 0.0578, (- 0.2997)], [(- 0.9684), (- 1.6946), (- 1.3188), (- 1.1938)], [(- 1.6744), (- 0.8917), (- 0.6556), 1.0073], [(- 0.1205), 0.3671, (- 0.3731), (- 0.5347)]]], [[[0.7035, 0.2089, (- 0.1774), 3.467], [(- 0.8505), (- 0.9278), 1.4714, 0.1644], [0.0898, 0.3531, (- 0.4007), 0.1927], [1.2569, (- 0.2636), (- 0.5223), 0.0616]], [[0.176, (- 0.7639), (- 0.46), (- 1.326)], [(- 0.9921), (- 0.297), (- 0.8955), 1.0508], [1.3515, (- 0.1641), 1.9679, 1.1986], [(- 0.3616), 0.6287, 0.4933, 0.336]], [[(- 0.586), 0.2124, (- 0.87), 2.42], [(- 0.0551), (- 1.5103), (- 1.6779), 0.8399], [0.8431, 1.2414, (- 1.1243), (- 0.3887)], [(- 2.1254), 0.6047, (- 0.3515), 0.7254]]]], device='cuda', requires_grad=True) bbox = torch.tensor([[[[13.08, 12.688, 11.214, 93.944, (- 0.91905)], [38.104, 10.134, 146.59, 90.306, (- 0.98211)], [(- 53.213), 49.508, 51.513, 32.055, (- 0.31954)], [26.974, 25.248, 54.495, 3.1083, (- 0.62127)]], [[(- 15.604), (- 51.908), 239.98, 15.008, (- 1.2546)], [31.354, (- 7.3635), 67.879, 35.081, (- 0.33851)], [(- 5.3292), 9.1946, 12.834, 10.485, (- 1.3039)], [(- 23.925), 36.623, 39.875, 72.009, (- 0.65934)]], [[72.114, (- 23.781), 29.106, 84.501, (- 1.134)], [26.258, (- 7.7034), 176.29, 106.15, (- 1.2156)], [38.057, 46.016, 12.965, 6.9384, (- 1.0855)], [24.428, (- 16.189), 205.72, 31.622, (- 0.15719)]], [[3.8226, 29.608, 14.457, 68.179, (- 0.91997)], [25.003, (- 42.49), 96.007, 49.086, (- 1.4786)], [85.983, 54.98, 78.08, 100.03, (- 1.0926)], [9.9065, 41.457, 5.9799, 17.973, (- 0.56313)]]], [[[(- 18.244), 4.6309, 53.01, 24.31, (- 0.70345)], [19.419, 36.704, 52.39, 54.133, (- 0.3773)], [56.387, 23.752, 9.0441, 17.792, (- 1.5583)], [36.303, 16.396, 20.283, 19.148, (- 0.83419)]], [[32.169, 30.521, 26.283, 196.8, (- 0.30454)], [25.788, (- 32.189), 88.882, 102.07, (- 1.5328)], [8.4676, (- 16.668), 24.657, 112.75, (- 0.40388)], [(- 10.799), 6.0422, 9.5807, 33.677, (- 0.35438)]], [[69.363, 10.85, 25.968, 22.311, (- 0.16408)], [2.814, 4.6843, 3.1289, 21.48, (- 0.67583)], [26.661, 45.29, 6.1679, 30.005, (- 0.89806)], [5.0871, 13.234, 92.087, 49.622, (- 0.2802)]], [[(- 12.643), 25.176, 50.488, 54.246, (- 0.4484)], [(- 34.521), 0.98435, 52.413, 9.7996, (- 0.84218)], [49.829, (- 10.808), 29.848, 73.579, (- 0.62672)], [80.446, 28.064, 45.273, 53.809, (- 1.2359)]]]], device='cuda', requires_grad=True) expected_output = torch.tensor([[[[1.1095, (- 0.2172), (- 0.5222), (- 0.6225)], [0.9144, 0.7662, 1.0487, (- 0.969)], [(- 0.8949), (- 1.6384), (- 0.9093), (- 0.3961)], [(- 0.8604), 0.5062, (- 0.7947), (- 0.7397)]], [[(- 0.3961), (- 0.7495), 1.3461, 1.5528], [0.2034, 0.5522, (- 1.6722), 0.5231], [(- 1.0062), 1.135, 1.4225, (- 0.3951)], [(- 0.4826), (- 1.624), 0.1932, 2.7181]], [[(- 2.6436), (- 1.0276), 0.0578, (- 0.8344)], [(- 0.9684), (- 1.8151), (- 2.1843), (- 1.1938)], [(- 1.6744), (- 1.0121), (- 0.6556), 1.0073], [(- 0.8474), 0.3671, (- 0.3731), (- 0.5347)]]], [[[0.7035, 0.2089, (- 0.1774), 3.467], [(- 0.8505), (- 0.9278), 1.4714, 0.1644], [0.0898, 0.3064, (- 0.4007), 0.5849], [1.2569, (- 0.2636), (- 0.5223), 0.0616]], [[0.176, (- 0.7639), (- 0.46), (- 1.326)], [(- 0.9921), (- 0.297), (- 0.8955), 1.0508], [1.3515, (- 0.6125), 1.9679, 0.555], [(- 0.3616), 0.6287, 0.4933, 0.336]], [[(- 0.586), 0.2124, (- 0.87), 2.42], [(- 0.0551), (- 1.5103), (- 1.6779), 0.8399], [0.8431, 0.8455, (- 1.1243), (- 1.5994)], [(- 2.1254), 0.6047, (- 0.3515), 0.7254]]]]).cuda() expected_grad = torch.tensor([[[[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]], [[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]], [[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]]], [[[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]).cuda() output = rotated_feature_align(feature, bbox, spatial_scale=(1 / 8), points=1) output.backward(torch.ones_like(output)) assert torch.allclose(output, expected_output, 0.01) assert torch.allclose(feature.grad, expected_grad, 0.01)
def test_sacconv(): x = torch.rand(1, 3, 256, 256) saconv = SAConv2d(3, 5, kernel_size=3, padding=1) sac_out = saconv(x) refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1) refer_out = refer_conv(x) assert (sac_out.shape == refer_out.shape) dalited_saconv = SAConv2d(3, 5, kernel_size=3, padding=2, dilation=2) dalited_sac_out = dalited_saconv(x) refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=2, dilation=2) refer_out = refer_conv(x) assert (dalited_sac_out.shape == refer_out.shape) deform_saconv = SAConv2d(3, 5, kernel_size=3, padding=1, use_deform=True) if torch.cuda.is_available(): x = torch.rand(1, 3, 256, 256).cuda() deform_saconv = SAConv2d(3, 5, kernel_size=3, padding=1, use_deform=True).cuda() deform_sac_out = deform_saconv(x).cuda() refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1).cuda() refer_out = refer_conv(x) assert (deform_sac_out.shape == refer_out.shape) else: deform_sac_out = deform_saconv(x) refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1) refer_out = refer_conv(x) assert (deform_sac_out.shape == refer_out.shape) x = torch.rand(1, 4, 256, 256) group_saconv = SAConv2d(4, 4, kernel_size=3, padding=1, groups=2) group_sac_out = group_saconv(x) refer_conv = nn.Conv2d(4, 4, kernel_size=3, padding=1, groups=2) refer_out = refer_conv(x) assert (group_sac_out.shape == refer_out.shape)
def make_sparse_convmodule(in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=None, order=('conv', 'norm', 'act')): 'Make sparse convolution module.\n\n Args:\n in_channels (int): the number of input channels\n out_channels (int): the number of out channels\n kernel_size (int|tuple(int)): kernel size of convolution\n indice_key (str): the indice key used for sparse tensor\n stride (int|tuple(int)): the stride of convolution\n padding (int or list[int]): the padding number of input\n conv_type (str): sparse conv type in spconv\n norm_cfg (dict[str]): config of normalization layer\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Common examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n\n Returns:\n spconv.SparseSequential: sparse convolution module.\n ' assert (isinstance(order, tuple) and (len(order) <= 3)) assert ((set(order) | {'conv', 'norm', 'act'}) == {'conv', 'norm', 'act'}) conv_cfg = dict(type=conv_type, indice_key=indice_key) layers = list() for layer in order: if (layer == 'conv'): if (conv_type not in ['SparseInverseConv3d', 'SparseInverseConv2d', 'SparseInverseConv1d']): layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False)) else: layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, bias=False)) elif (layer == 'norm'): layers.append(build_norm_layer(norm_cfg, out_channels)[1]) elif (layer == 'act'): layers.append(nn.ReLU(inplace=True)) layers = SparseSequential(*layers) return layers
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_make_sparse_convmodule(): voxel_features = torch.tensor([[6.56126, 0.9648336, (- 1.7339306), 0.315], [6.8162713, (- 2.480431), (- 1.3616394), 0.36], [11.643568, (- 4.744306), (- 1.3580885), 0.16], [23.482342, 6.5036807, 0.5806964, 0.35]], dtype=torch.float32, device='cuda') coordinates = torch.tensor([[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], [1, 35, 930, 469]], dtype=torch.int32, device='cuda') input_sp_tensor = SparseConvTensor(voxel_features, coordinates, [41, 1600, 1408], 2) sparse_block0 = make_sparse_convmodule(4, 16, 3, 'test0', stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('conv', 'norm', 'act')).cuda() assert isinstance(sparse_block0[0], SubMConv3d) assert (sparse_block0[0].in_channels == 4) assert (sparse_block0[0].out_channels == 16) assert isinstance(sparse_block0[1], torch.nn.BatchNorm1d) assert (sparse_block0[1].eps == 0.001) assert (sparse_block0[1].momentum == 0.01) assert isinstance(sparse_block0[2], torch.nn.ReLU) out_features = sparse_block0(input_sp_tensor) assert (out_features.features.shape == torch.Size([4, 16])) sparse_block1 = make_sparse_convmodule(4, 16, 3, 'test1', stride=1, padding=0, conv_type='SparseInverseConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('norm', 'act', 'conv')).cuda() assert isinstance(sparse_block1[0], torch.nn.BatchNorm1d) assert isinstance(sparse_block1[1], torch.nn.ReLU) assert isinstance(sparse_block1[2], SparseInverseConv3d)
class TestSyncBN(object): def dist_init(self): rank = int(os.environ['SLURM_PROCID']) world_size = int(os.environ['SLURM_NTASKS']) local_rank = int(os.environ['SLURM_LOCALID']) node_list = str(os.environ['SLURM_NODELIST']) node_parts = re.findall('[0-9]+', node_list) os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' + f'.{node_parts[3]}.{node_parts[4]}') os.environ['MASTER_PORT'] = '12341' os.environ['WORLD_SIZE'] = str(world_size) os.environ['RANK'] = str(rank) dist.init_process_group('nccl') torch.cuda.set_device(local_rank) def _test_syncbn_train(self, size=1, half=False): if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) != 4)): print('must run with slurm has 4 processes!\nsrun -p test --gres=gpu:4 -n4') return else: print('Running syncbn test') from mmcv.ops import SyncBatchNorm assert (size in (1, 2, 4)) if (not dist.is_initialized()): self.dist_init() rank = dist.get_rank() torch.manual_seed(9) torch.cuda.manual_seed(9) self.x = torch.rand(16, 3, 2, 3).cuda() self.y_bp = torch.rand(16, 3, 2, 3).cuda() if half: self.x = self.x.half() self.y_bp = self.y_bp.half() dist.broadcast(self.x, src=0) dist.broadcast(self.y_bp, src=0) torch.cuda.synchronize() if (size == 1): groups = [None, None, None, None] groups[0] = dist.new_group([0]) groups[1] = dist.new_group([1]) groups[2] = dist.new_group([2]) groups[3] = dist.new_group([3]) group = groups[rank] elif (size == 2): groups = [None, None, None, None] groups[0] = groups[1] = dist.new_group([0, 1]) groups[2] = groups[3] = dist.new_group([2, 3]) group = groups[rank] elif (size == 4): group = dist.group.WORLD syncbn = SyncBatchNorm(3, group=group).cuda() syncbn.weight.data[0] = 0.2 syncbn.weight.data[1] = 0.5 syncbn.weight.data[2] = 0.7 syncbn.train() bn = nn.BatchNorm2d(3).cuda() bn.weight.data[0] = 0.2 bn.weight.data[1] = 0.5 bn.weight.data[2] = 0.7 bn.train() sx = self.x[(rank * 4):((rank * 4) + 4)] sx.requires_grad_() sy = syncbn(sx) sy.backward(self.y_bp[(rank * 4):((rank * 4) + 4)]) smean = syncbn.running_mean svar = syncbn.running_var sx_grad = sx.grad sw_grad = syncbn.weight.grad sb_grad = syncbn.bias.grad if (size == 1): x = self.x[(rank * 4):((rank * 4) + 4)] y_bp = self.y_bp[(rank * 4):((rank * 4) + 4)] elif (size == 2): x = self.x[((rank // 2) * 8):(((rank // 2) * 8) + 8)] y_bp = self.y_bp[((rank // 2) * 8):(((rank // 2) * 8) + 8)] elif (size == 4): x = self.x y_bp = self.y_bp x.requires_grad_() y = bn(x) y.backward(y_bp) if (size == 2): y = y[((rank % 2) * 4):(((rank % 2) * 4) + 4)] elif (size == 4): y = y[(rank * 4):((rank * 4) + 4)] mean = bn.running_mean var = bn.running_var if (size == 1): x_grad = x.grad w_grad = bn.weight.grad b_grad = bn.bias.grad elif (size == 2): x_grad = x.grad[((rank % 2) * 4):(((rank % 2) * 4) + 4)] w_grad = (bn.weight.grad / 2) b_grad = (bn.bias.grad / 2) elif (size == 4): x_grad = x.grad[(rank * 4):((rank * 4) + 4)] w_grad = (bn.weight.grad / 4) b_grad = (bn.bias.grad / 4) assert np.allclose(mean.data.cpu().numpy(), smean.data.cpu().numpy(), 0.001) assert np.allclose(var.data.cpu().numpy(), svar.data.cpu().numpy(), 0.001) assert np.allclose(y.data.cpu().numpy(), sy.data.cpu().numpy(), 0.001) assert np.allclose(w_grad.data.cpu().numpy(), sw_grad.data.cpu().numpy(), 0.001) assert np.allclose(b_grad.data.cpu().numpy(), sb_grad.data.cpu().numpy(), 0.001) assert np.allclose(x_grad.data.cpu().numpy(), sx_grad.data.cpu().numpy(), 0.01) def _test_syncbn_empty_train(self, size=1, half=False): if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) != 4)): print('must run with slurm has 4 processes!\nsrun -p test --gres=gpu:4 -n4') return else: print('Running syncbn test') from mmcv.ops import SyncBatchNorm assert (size in (1, 2, 4)) if (not dist.is_initialized()): self.dist_init() rank = dist.get_rank() torch.manual_seed(9) torch.cuda.manual_seed(9) self.x = torch.rand(0, 3, 2, 3).cuda() self.y_bp = torch.rand(0, 3, 2, 3).cuda() if half: self.x = self.x.half() self.y_bp = self.y_bp.half() dist.broadcast(self.x, src=0) dist.broadcast(self.y_bp, src=0) torch.cuda.synchronize() if (size == 1): groups = [None, None, None, None] groups[0] = dist.new_group([0]) groups[1] = dist.new_group([1]) groups[2] = dist.new_group([2]) groups[3] = dist.new_group([3]) group = groups[rank] elif (size == 2): groups = [None, None, None, None] groups[0] = groups[1] = dist.new_group([0, 1]) groups[2] = groups[3] = dist.new_group([2, 3]) group = groups[rank] elif (size == 4): group = dist.group.WORLD syncbn = SyncBatchNorm(3, group=group, stats_mode='N').cuda() syncbn.weight.data[0] = 0.2 syncbn.weight.data[1] = 0.5 syncbn.weight.data[2] = 0.7 syncbn.train() bn = nn.BatchNorm2d(3).cuda() bn.weight.data[0] = 0.2 bn.weight.data[1] = 0.5 bn.weight.data[2] = 0.7 bn.train() sx = self.x[(rank * 4):((rank * 4) + 4)] sx.requires_grad_() sy = syncbn(sx) sy.backward(self.y_bp[(rank * 4):((rank * 4) + 4)]) smean = syncbn.running_mean svar = syncbn.running_var sx_grad = sx.grad sw_grad = syncbn.weight.grad sb_grad = syncbn.bias.grad if (size == 1): x = self.x[(rank * 4):((rank * 4) + 4)] y_bp = self.y_bp[(rank * 4):((rank * 4) + 4)] elif (size == 2): x = self.x[((rank // 2) * 8):(((rank // 2) * 8) + 8)] y_bp = self.y_bp[((rank // 2) * 8):(((rank // 2) * 8) + 8)] elif (size == 4): x = self.x y_bp = self.y_bp x.requires_grad_() y = bn(x) y.backward(y_bp) if (size == 2): y = y[((rank % 2) * 4):(((rank % 2) * 4) + 4)] elif (size == 4): y = y[(rank * 4):((rank * 4) + 4)] mean = bn.running_mean var = bn.running_var if (size == 1): x_grad = x.grad w_grad = bn.weight.grad b_grad = bn.bias.grad elif (size == 2): x_grad = x.grad[((rank % 2) * 4):(((rank % 2) * 4) + 4)] w_grad = (bn.weight.grad / 2) b_grad = (bn.bias.grad / 2) elif (size == 4): x_grad = x.grad[(rank * 4):((rank * 4) + 4)] w_grad = (bn.weight.grad / 4) b_grad = (bn.bias.grad / 4) assert np.allclose(mean.data.cpu().numpy(), smean.data.cpu().numpy(), 0.001) assert np.allclose(var.data.cpu().numpy(), svar.data.cpu().numpy(), 0.001) assert np.allclose(y.data.cpu().numpy(), sy.data.cpu().numpy(), 0.001) assert np.allclose(w_grad.data.cpu().numpy(), sw_grad.data.cpu().numpy(), 0.001) assert np.allclose(b_grad.data.cpu().numpy(), sb_grad.data.cpu().numpy(), 0.001) assert np.allclose(x_grad.data.cpu().numpy(), sx_grad.data.cpu().numpy(), 0.01) with pytest.raises(AssertionError): SyncBatchNorm(3, group=group, stats_mode='X') def test_syncbn_1(self): self._test_syncbn_train(size=1) def test_syncbn_2(self): self._test_syncbn_train(size=2) def test_syncbn_4(self): self._test_syncbn_train(size=4) def test_syncbn_1_half(self): self._test_syncbn_train(size=1, half=True) def test_syncbn_2_half(self): self._test_syncbn_train(size=2, half=True) def test_syncbn_4_half(self): self._test_syncbn_train(size=4, half=True) def test_syncbn_empty_1(self): self._test_syncbn_empty_train(size=1) def test_syncbn_empty_2(self): self._test_syncbn_empty_train(size=2) def test_syncbn_empty_4(self): self._test_syncbn_empty_train(size=4) def test_syncbn_empty_1_half(self): self._test_syncbn_empty_train(size=1, half=True) def test_syncbn_empty_2_half(self): self._test_syncbn_empty_train(size=2, half=True) def test_syncbn_empty_4_half(self): self._test_syncbn_empty_train(size=4, half=True)
def remove_tmp_file(func): @wraps(func) def wrapper(*args, **kwargs): onnx_file = 'tmp.onnx' kwargs['onnx_file'] = onnx_file try: result = func(*args, **kwargs) finally: if os.path.exists(onnx_file): os.remove(onnx_file) return result return wrapper
@remove_tmp_file def export_nms_module_to_onnx(module, onnx_file): torch_model = module() torch_model.eval() input = (torch.rand([100, 4], dtype=torch.float32), torch.rand([100], dtype=torch.float32)) torch.onnx.export(torch_model, input, onnx_file, opset_version=11, input_names=['boxes', 'scores'], output_names=['output']) onnx_model = onnx.load(onnx_file) return onnx_model
def test_can_handle_nms_with_constant_maxnum(): class ModuleNMS(torch.nn.Module): def forward(self, boxes, scores): return nms(boxes, scores, iou_threshold=0.4, max_num=10) onnx_model = export_nms_module_to_onnx(ModuleNMS) preprocess_onnx_model = preprocess_onnx(onnx_model) for node in preprocess_onnx_model.graph.node: if ('NonMaxSuppression' in node.name): assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.'
def test_can_handle_nms_with_undefined_maxnum(): class ModuleNMS(torch.nn.Module): def forward(self, boxes, scores): return nms(boxes, scores, iou_threshold=0.4) onnx_model = export_nms_module_to_onnx(ModuleNMS) preprocess_onnx_model = preprocess_onnx(onnx_model) for node in preprocess_onnx_model.graph.node: if ('NonMaxSuppression' in node.name): assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.' assert (node.attribute[2].i > 0), 'The max_output_boxes_per_class is not defined correctly.'
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_three_interpolate(): features = torch.tensor([[[2.435, 4.7516, 4.4995, 2.435, 2.435, 2.435], [3.1236, 2.6278, 3.0447, 3.1236, 3.1236, 3.1236], [2.6732, 2.8677, 2.6436, 2.6732, 2.6732, 2.6732], [0.0124, 7.015, 7.0199, 0.0124, 0.0124, 0.0124], [0.3207, 0.0, 0.3411, 0.3207, 0.3207, 0.3207]], [[0.0, 0.9544, 2.4532, 0.0, 0.0, 0.0], [0.5346, 1.9176, 1.4715, 0.5346, 0.5346, 0.5346], [0.0, 0.2744, 2.0842, 0.0, 0.0, 0.0], [0.3414, 1.5063, 1.6209, 0.3414, 0.3414, 0.3414], [0.5814, 0.0103, 0.0, 0.5814, 0.5814, 0.5814]]]).cuda() idx = torch.tensor([[[0, 1, 2], [2, 3, 4], [2, 3, 4], [0, 1, 2], [0, 1, 2], [0, 1, 3]], [[0, 2, 3], [1, 3, 4], [2, 1, 4], [0, 2, 4], [0, 2, 4], [0, 1, 2]]]).int().cuda() weight = torch.tensor([[[0.33333, 0.33333, 0.33333], [1.0, 5.8155e-08, 2.2373e-08], [1.0, 1.7737e-08, 1.7356e-08], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333]], [[0.33333, 0.33333, 0.33333], [1.0, 1.3651e-08, 7.7312e-09], [1.0, 1.7148e-08, 1.407e-08], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333]]]).cuda() output = three_interpolate(features, idx, weight) expected_output = torch.tensor([[[3.8953, 4.4995, 4.4995, 3.8953, 3.8953, 3.2072], [2.932, 3.0447, 3.0447, 2.932, 2.932, 2.9583], [2.7281, 2.6436, 2.6436, 2.7281, 2.7281, 2.738], [4.6824, 7.0199, 7.0199, 4.6824, 4.6824, 2.3466], [0.2206, 0.3411, 0.3411, 0.2206, 0.2206, 0.2138]], [[0.81773, 0.9544, 2.4532, 0.81773, 0.81773, 1.1359], [0.84689, 1.9176, 1.4715, 0.84689, 0.84689, 1.3079], [0.69473, 0.2744, 2.0842, 0.69473, 0.69473, 0.78619], [0.76789, 1.5063, 1.6209, 0.76789, 0.76789, 1.1562], [0.3876, 0.0103, 8.3569e-09, 0.3876, 0.3876, 0.19723]]]).cuda() assert torch.allclose(output, expected_output, 0.0001)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_three_nn(): known = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.8373), 3.5605, (- 0.7867)], [(- 1.8373), 3.5605, (- 0.7867)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 1.3399), 1.9991, (- 0.3698)], [(- 1.3399), 1.9991, (- 0.3698)]]]).cuda() unknown = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.5237), 2.3976, (- 0.8097)], [(- 0.0722), 3.4017, (- 0.288)], [0.5198, 3.0661, (- 0.4605)], [(- 2.0185), 3.5019, (- 0.3236)], [0.5098, 3.102, 0.5799], [(- 1.6137), 3.8443, (- 0.5269)], [0.7341, 2.9626, (- 0.3189)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 0.9022), 1.656, (- 1.309)], [0.1156, 1.6901, (- 0.4366)], [(- 0.6477), 2.3576, (- 0.1563)], [(- 0.8482), 1.1466, (- 1.2704)], [(- 0.8753), 2.0845, (- 0.346)], [(- 0.5621), 1.4233, (- 1.2858)], [(- 0.5883), 1.3114, (- 1.2899)]]]).cuda() (dist, idx) = three_nn(unknown, known) expected_dist = torch.tensor([[[0.0, 0.0, 0.0], [0.0, 2.0463, 2.8588], [0.0, 1.2229, 1.2229], [1.2047, 1.2047, 1.2047], [1.0011, 1.0845, 1.8411], [0.7433, 1.4451, 2.4304], [0.5007, 0.5007, 0.5007], [0.4587, 2.0875, 2.7544], [0.445, 0.445, 0.445], [0.5514, 1.7206, 2.6811]], [[0.0, 0.0, 0.0], [0.0, 1.6464, 1.6952], [0.0, 1.5125, 1.5125], [1.0915, 1.0915, 1.0915], [0.8197, 0.8511, 1.4894], [0.7433, 0.8082, 0.8082], [0.8955, 1.334, 1.334], [0.473, 0.473, 0.473], [0.7949, 1.3325, 1.3325], [0.7566, 1.3727, 1.3727]]]).cuda() expected_idx = torch.tensor([[[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [1, 2, 0], [0, 3, 4], [1, 2, 0], [0, 3, 4], [1, 2, 0]], [[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [2, 0, 3], [1, 0, 3], [0, 3, 4], [1, 0, 3], [1, 0, 3]]]).cuda() assert torch.allclose(dist, expected_dist, 0.0001) assert torch.all((idx == expected_idx))
def _test_tinshift_gradcheck(dtype): try: from mmcv.ops import tin_shift except ModuleNotFoundError: pytest.skip('TINShift op is not successfully compiled') if (dtype == torch.half): pytest.skip('"add_cpu/sub_cpu" not implemented for Half') for shift in shifts: np_input = np.array(inputs) np_shift = np.array(shift) x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True) shift = torch.tensor(np_shift, device='cuda').int() if (torch.__version__ == 'parrots'): gradcheck(tin_shift, (x, shift)) else: gradcheck(tin_shift, (x, shift), atol=1, rtol=0.1)
def _test_tinshift_allclose(dtype): try: from mmcv.ops import tin_shift except ModuleNotFoundError: pytest.skip('TINShift op is not successfully compiled') for (shift, output, grad) in zip(shifts, outputs, grads): np_input = np.array(inputs) np_shift = np.array(shift) np_output = np.array(output) np_grad = np.array(grad) x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True) shift = torch.tensor(np_shift, device='cuda').int() output = tin_shift(x, shift) output.backward(torch.ones_like(output)) assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, 0.001) assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, 0.001)
def _test_tinshift_assert(dtype): try: from mmcv.ops import tin_shift except ModuleNotFoundError: pytest.skip('TINShift op is not successfully compiled') inputs = [torch.rand(2, 3, 4, 2), torch.rand(2, 3, 4, 2)] shifts = [torch.rand(2, 3), torch.rand(2, 5)] for (x, shift) in zip(inputs, shifts): x = x.cuda() shift = shift.cuda() with pytest.raises(ValueError): tin_shift(x, shift)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') @pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half]) def test_tinshift(dtype): _test_tinshift_allclose(dtype=dtype) _test_tinshift_gradcheck(dtype=dtype) _test_tinshift_assert(dtype=dtype)
def mock(*args, **kwargs): pass
@patch('torch.distributed._broadcast_coalesced', mock) @patch('torch.distributed.broadcast', mock) @patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock) def test_is_module_wrapper(): class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(2, 2, 1) def forward(self, x): return self.conv(x) if hasattr(torch.distributed, '_verify_model_across_ranks'): torch.distributed._verify_model_across_ranks = mock if hasattr(torch.distributed, '_verify_params_across_processes'): torch.distributed._verify_params_across_processes = mock model = Model() assert (not is_module_wrapper(model)) dp = DataParallel(model) assert is_module_wrapper(dp) mmdp = MMDataParallel(model) assert is_module_wrapper(mmdp) ddp = DistributedDataParallel(model, process_group=MagicMock()) assert is_module_wrapper(ddp) mmddp = MMDistributedDataParallel(model, process_group=MagicMock()) assert is_module_wrapper(mmddp) deprecated_mmddp = DeprecatedMMDDP(model) assert is_module_wrapper(deprecated_mmddp) @MODULE_WRAPPERS.register_module() class ModuleWrapper(object): def __init__(self, module): self.module = module def forward(self, *args, **kwargs): return self.module(*args, **kwargs) module_wraper = ModuleWrapper(model) assert is_module_wrapper(module_wraper)
def test_get_input_device(): input = torch.zeros([1, 3, 3, 3]) assert (get_input_device(input) == (- 1)) inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])] assert (get_input_device(inputs) == (- 1)) if torch.cuda.is_available(): input = torch.zeros([1, 3, 3, 3]).cuda() assert (get_input_device(input) == 0) inputs = [torch.zeros([1, 3, 3, 3]).cuda(), torch.zeros([1, 4, 4, 4]).cuda()] assert (get_input_device(inputs) == 0) with pytest.raises(Exception): get_input_device(5)
def test_scatter(): input = torch.zeros([1, 3, 3, 3]) output = scatter(input=input, devices=[(- 1)]) assert torch.allclose(input, output) inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])] outputs = scatter(input=inputs, devices=[(- 1)]) for (input, output) in zip(inputs, outputs): assert torch.allclose(input, output) if torch.cuda.is_available(): input = torch.zeros([1, 3, 3, 3]) output = scatter(input=input, devices=[0]) assert torch.allclose(input.cuda(), output) inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])] outputs = scatter(input=inputs, devices=[0]) for (input, output) in zip(inputs, outputs): assert torch.allclose(input.cuda(), output) with pytest.raises(Exception): scatter(5, [(- 1)])
def test_Scatter(): target_gpus = [(- 1)] input = torch.zeros([1, 3, 3, 3]) outputs = Scatter.forward(target_gpus, input) assert isinstance(outputs, tuple) assert torch.allclose(input, outputs[0]) target_gpus = [(- 1)] inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])] outputs = Scatter.forward(target_gpus, inputs) assert isinstance(outputs, tuple) for (input, output) in zip(inputs, outputs): assert torch.allclose(input, output) if torch.cuda.is_available(): target_gpus = [0] input = torch.zeros([1, 3, 3, 3]) outputs = Scatter.forward(target_gpus, input) assert isinstance(outputs, tuple) assert torch.allclose(input.cuda(), outputs[0]) target_gpus = [0] inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])] outputs = Scatter.forward(target_gpus, inputs) assert isinstance(outputs, tuple) for (input, output) in zip(inputs, outputs): assert torch.allclose(input.cuda(), output[0])
@COMPONENTS.register_module() class FooConv1d(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg) self.conv1d = nn.Conv1d(4, 1, 4) def forward(self, x): return self.conv1d(x)
@COMPONENTS.register_module() class FooConv2d(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg) self.conv2d = nn.Conv2d(3, 1, 3) def forward(self, x): return self.conv2d(x)
@COMPONENTS.register_module() class FooLinear(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg) self.linear = nn.Linear(3, 4) def forward(self, x): return self.linear(x)
@COMPONENTS.register_module() class FooLinearConv1d(BaseModule): def __init__(self, linear=None, conv1d=None, init_cfg=None): super().__init__(init_cfg) if (linear is not None): self.linear = build_from_cfg(linear, COMPONENTS) if (conv1d is not None): self.conv1d = build_from_cfg(conv1d, COMPONENTS) def forward(self, x): x = self.linear(x) return self.conv1d(x)
@FOOMODELS.register_module() class FooModel(BaseModule): def __init__(self, component1=None, component2=None, component3=None, component4=None, init_cfg=None) -> None: super().__init__(init_cfg) if (component1 is not None): self.component1 = build_from_cfg(component1, COMPONENTS) if (component2 is not None): self.component2 = build_from_cfg(component2, COMPONENTS) if (component3 is not None): self.component3 = build_from_cfg(component3, COMPONENTS) if (component4 is not None): self.component4 = build_from_cfg(component4, COMPONENTS) self.reg = nn.Linear(3, 4)
def test_initilization_info_logger(): import os import torch.nn as nn from mmcv.utils.logging import get_logger class OverloadInitConv(nn.Conv2d, BaseModule): def init_weights(self): for p in self.parameters(): with torch.no_grad(): p.fill_(1) class CheckLoggerModel(BaseModule): def __init__(self, init_cfg=None): super(CheckLoggerModel, self).__init__(init_cfg) self.conv1 = nn.Conv2d(1, 1, 1, 1) self.conv2 = OverloadInitConv(1, 1, 1, 1) self.conv3 = nn.Conv2d(1, 1, 1, 1) self.fc1 = nn.Linear(1, 1) init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)] model = CheckLoggerModel(init_cfg=init_cfg) train_log = '20210720_132454.log' workdir = tempfile.mkdtemp() log_file = os.path.join(workdir, train_log) get_logger('init_logger', log_file=log_file) assert (not hasattr(model, '_params_init_info')) model.init_weights() assert (not hasattr(model, '_params_init_info')) assert os.path.exists(log_file) lines = mmcv.list_from_file(log_file) for (i, line) in enumerate(lines): if ('conv1.weight' in line): assert ('NormalInit' in lines[(i + 1)]) if ('conv2.weight' in line): assert ('OverloadInitConv' in lines[(i + 1)]) if ('fc1.weight' in line): assert ('ConstantInit' in lines[(i + 1)]) class OverloadInitConvFc(nn.Conv2d, BaseModule): def __init__(self, *args, **kwargs): super(OverloadInitConvFc, self).__init__(*args, **kwargs) self.conv1 = nn.Linear(1, 1) def init_weights(self): for p in self.parameters(): with torch.no_grad(): p.fill_(1) class CheckLoggerModel(BaseModule): def __init__(self, init_cfg=None): super(CheckLoggerModel, self).__init__(init_cfg) self.conv1 = nn.Conv2d(1, 1, 1, 1) self.conv2 = OverloadInitConvFc(1, 1, 1, 1) self.conv3 = nn.Conv2d(1, 1, 1, 1) self.fc1 = nn.Linear(1, 1) class TopLevelModule(BaseModule): def __init__(self, init_cfg=None, checklog_init_cfg=None): super(TopLevelModule, self).__init__(init_cfg) self.module1 = CheckLoggerModel(checklog_init_cfg) self.module2 = OverloadInitConvFc(1, 1, 1, 1) checklog_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)] top_level_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='module2', std=0.01, bias_prob=0.01))] model = TopLevelModule(init_cfg=top_level_init_cfg, checklog_init_cfg=checklog_init_cfg) model.module1.init_weights() model.module2.init_weights() model.init_weights() model.module1.init_weights() model.module2.init_weights() assert (not hasattr(model, '_params_init_info')) model.init_weights() assert (not hasattr(model, '_params_init_info')) assert os.path.exists(log_file) lines = mmcv.list_from_file(log_file) for (i, line) in enumerate(lines): if (('TopLevelModule' in line) and ('init_cfg' not in line)): assert ('the same' in line)
def test_update_init_info(): class DummyModel(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg) self.conv1 = nn.Conv2d(1, 1, 1, 1) self.conv3 = nn.Conv2d(1, 1, 1, 1) self.fc1 = nn.Linear(1, 1) model = DummyModel() from collections import defaultdict model._params_init_info = defaultdict(dict) for (name, param) in model.named_parameters(): model._params_init_info[param]['init_info'] = 'init' model._params_init_info[param]['tmp_mean_value'] = param.data.mean() with torch.no_grad(): for p in model.parameters(): p.fill_(1) update_init_info(model, init_info='fill_1') for item in model._params_init_info.values(): assert (item['init_info'] == 'fill_1') assert (item['tmp_mean_value'] == 1) model.conv1.bias = nn.Parameter(torch.ones_like(model.conv1.bias)) with pytest.raises(AssertionError): update_init_info(model, init_info=' ')
def test_model_weight_init(): '\n Config\n model (FooModel, Linear: weight=1, bias=2, Conv1d: weight=3, bias=4,\n Conv2d: weight=5, bias=6)\n ├──component1 (FooConv1d)\n ├──component2 (FooConv2d)\n ├──component3 (FooLinear)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear)\n ├──conv1d (FooConv1d)\n ├──reg (nn.Linear)\n\n Parameters after initialization\n model (FooModel)\n ├──component1 (FooConv1d, weight=3, bias=4)\n ├──component2 (FooConv2d, weight=5, bias=6)\n ├──component3 (FooLinear, weight=1, bias=2)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear, weight=1, bias=2)\n ├──conv1d (FooConv1d, weight=3, bias=4)\n ├──reg (nn.Linear, weight=1, bias=2)\n ' model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear'), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d'), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear'), component4=dict(type='FooLinearConv1d', linear=dict(type='FooLinear'), conv1d=dict(type='FooConv1d'))) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 3.0)) assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 4.0)) assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 5.0)) assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 6.0)) assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0)) assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0)) assert torch.equal(model.component4.linear.linear.weight, torch.full(model.component4.linear.linear.weight.shape, 1.0)) assert torch.equal(model.component4.linear.linear.bias, torch.full(model.component4.linear.linear.bias.shape, 2.0)) assert torch.equal(model.component4.conv1d.conv1d.weight, torch.full(model.component4.conv1d.conv1d.weight.shape, 3.0)) assert torch.equal(model.component4.conv1d.conv1d.bias, torch.full(model.component4.conv1d.conv1d.bias.shape, 4.0)) assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 1.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 2.0))
def test_nest_components_weight_init(): '\n Config\n model (FooModel, Linear: weight=1, bias=2, Conv1d: weight=3, bias=4,\n Conv2d: weight=5, bias=6)\n ├──component1 (FooConv1d, Conv1d: weight=7, bias=8)\n ├──component2 (FooConv2d, Conv2d: weight=9, bias=10)\n ├──component3 (FooLinear)\n ├──component4 (FooLinearConv1d, Linear: weight=11, bias=12)\n ├──linear (FooLinear, Linear: weight=11, bias=12)\n ├──conv1d (FooConv1d)\n ├──reg (nn.Linear, weight=13, bias=14)\n\n Parameters after initialization\n model (FooModel)\n ├──component1 (FooConv1d, weight=7, bias=8)\n ├──component2 (FooConv2d, weight=9, bias=10)\n ├──component3 (FooLinear, weight=1, bias=2)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear, weight=1, bias=2)\n ├──conv1d (FooConv1d, weight=3, bias=4)\n ├──reg (nn.Linear, weight=13, bias=14)\n ' model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear', override=dict(type='Constant', name='reg', val=13, bias=14)), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=7, bias=8)), component2=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=9, bias=10)), component3=dict(type='FooLinear'), component4=dict(type='FooLinearConv1d', linear=dict(type='FooLinear'), conv1d=dict(type='FooConv1d'))) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 7.0)) assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 8.0)) assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 9.0)) assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 10.0)) assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0)) assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0)) assert torch.equal(model.component4.linear.linear.weight, torch.full(model.component4.linear.linear.weight.shape, 1.0)) assert torch.equal(model.component4.linear.linear.bias, torch.full(model.component4.linear.linear.bias.shape, 2.0)) assert torch.equal(model.component4.conv1d.conv1d.weight, torch.full(model.component4.conv1d.conv1d.weight.shape, 3.0)) assert torch.equal(model.component4.conv1d.conv1d.bias, torch.full(model.component4.conv1d.conv1d.bias.shape, 4.0)) assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 13.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 14.0))
def test_without_layer_weight_init(): model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear'), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d', init_cfg=dict(type='Constant', val=7, bias=8)), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear')) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 3.0)) assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 4.0)) assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 5.0)) assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 6.0)) assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0)) assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0)) assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 1.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 2.0))
def test_override_weight_init(): model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=10, bias=20, override=dict(name='reg'))], component1=dict(type='FooConv1d'), component3=dict(type='FooLinear')) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 10.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 20.0)) assert (not torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 10.0))) assert (not torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 20.0))) assert (not torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 10.0))) assert (not torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 20.0))) model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, override=dict(name='reg', type='Constant', val=30, bias=40))], component1=dict(type='FooConv1d'), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear')) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 30.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 40.0))
def test_sequential_model_weight_init(): seq_model_cfg = [dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))] layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg] seq_model = Sequential(*layers) seq_model.init_weights() assert torch.equal(seq_model[0].conv1d.weight, torch.full(seq_model[0].conv1d.weight.shape, 0.0)) assert torch.equal(seq_model[0].conv1d.bias, torch.full(seq_model[0].conv1d.bias.shape, 1.0)) assert torch.equal(seq_model[1].conv2d.weight, torch.full(seq_model[1].conv2d.weight.shape, 2.0)) assert torch.equal(seq_model[1].conv2d.bias, torch.full(seq_model[1].conv2d.bias.shape, 3.0)) layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg] seq_model = Sequential(*layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0)) seq_model.init_weights() assert torch.equal(seq_model[0].conv1d.weight, torch.full(seq_model[0].conv1d.weight.shape, 0.0)) assert torch.equal(seq_model[0].conv1d.bias, torch.full(seq_model[0].conv1d.bias.shape, 1.0)) assert torch.equal(seq_model[1].conv2d.weight, torch.full(seq_model[1].conv2d.weight.shape, 2.0)) assert torch.equal(seq_model[1].conv2d.bias, torch.full(seq_model[1].conv2d.bias.shape, 3.0))
def test_modulelist_weight_init(): models_cfg = [dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))] layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg] modellist = ModuleList(layers) modellist.init_weights() assert torch.equal(modellist[0].conv1d.weight, torch.full(modellist[0].conv1d.weight.shape, 0.0)) assert torch.equal(modellist[0].conv1d.bias, torch.full(modellist[0].conv1d.bias.shape, 1.0)) assert torch.equal(modellist[1].conv2d.weight, torch.full(modellist[1].conv2d.weight.shape, 2.0)) assert torch.equal(modellist[1].conv2d.bias, torch.full(modellist[1].conv2d.bias.shape, 3.0)) layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg] modellist = ModuleList(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0)) modellist.init_weights() assert torch.equal(modellist[0].conv1d.weight, torch.full(modellist[0].conv1d.weight.shape, 0.0)) assert torch.equal(modellist[0].conv1d.bias, torch.full(modellist[0].conv1d.bias.shape, 1.0)) assert torch.equal(modellist[1].conv2d.weight, torch.full(modellist[1].conv2d.weight.shape, 2.0)) assert torch.equal(modellist[1].conv2d.bias, torch.full(modellist[1].conv2d.bias.shape, 3.0))
def test_moduledict_weight_init(): models_cfg = dict(foo_conv_1d=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), foo_conv_2d=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))) layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()} modeldict = ModuleDict(layers) modeldict.init_weights() assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0)) assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0)) layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()} modeldict = ModuleDict(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0)) modeldict.init_weights() assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0)) assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0))
@MODULE_WRAPPERS.register_module() class DDPWrapper(object): def __init__(self, module): self.module = module
class Block(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1) self.norm = nn.BatchNorm2d(3)
class Model(nn.Module): def __init__(self): super().__init__() self.block = Block() self.conv = nn.Conv2d(3, 3, 1)
class Mockpavimodel(object): def __init__(self, name='fakename'): self.name = name def download(self, file): pass
def assert_tensor_equal(tensor_a, tensor_b): assert tensor_a.eq(tensor_b).all()
def test_get_state_dict(): if (torch.__version__ == 'parrots'): state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'conv.weight', 'conv.bias']) else: state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'block.norm.num_batches_tracked', 'conv.weight', 'conv.bias']) model = Model() state_dict = get_state_dict(model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], model.block.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], model.block.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], model.block.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], model.block.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], model.block.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], model.block.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], model.block.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], model.conv.weight) assert_tensor_equal(state_dict['conv.bias'], model.conv.bias) wrapped_model = DDPWrapper(model) state_dict = get_state_dict(wrapped_model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.weight) assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.bias) for (name, module) in wrapped_model.module._modules.items(): module = DataParallel(module) wrapped_model.module._modules[name] = module state_dict = get_state_dict(wrapped_model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.module.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.module.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.module.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.module.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.module.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.module.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.module.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.module.weight) assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.module.bias)
def test_load_pavimodel_dist(): sys.modules['pavi'] = MagicMock() sys.modules['pavi.modelcloud'] = MagicMock() pavimodel = Mockpavimodel() import pavi pavi.modelcloud.get = MagicMock(return_value=pavimodel) with pytest.raises(AssertionError): _ = load_from_pavi('MyPaviFolder/checkpoint.pth') with pytest.raises(FileNotFoundError): _ = load_from_pavi('pavi://checkpoint.pth')
def test_load_checkpoint_with_prefix(): class FooModule(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(1, 2) self.conv2d = nn.Conv2d(3, 1, 3) self.conv2d_2 = nn.Conv2d(3, 2, 3) model = FooModule() nn.init.constant_(model.linear.weight, 1) nn.init.constant_(model.linear.bias, 2) nn.init.constant_(model.conv2d.weight, 3) nn.init.constant_(model.conv2d.bias, 4) nn.init.constant_(model.conv2d_2.weight, 5) nn.init.constant_(model.conv2d_2.bias, 6) with TemporaryDirectory(): torch.save(model.state_dict(), 'model.pth') prefix = 'conv2d' state_dict = _load_checkpoint_with_prefix(prefix, 'model.pth') assert torch.equal(model.conv2d.state_dict()['weight'], state_dict['weight']) assert torch.equal(model.conv2d.state_dict()['bias'], state_dict['bias']) with pytest.raises(AssertionError): prefix = 'back' _load_checkpoint_with_prefix(prefix, 'model.pth')
def test_load_checkpoint(): import os import re import tempfile class PrefixModel(nn.Module): def __init__(self): super().__init__() self.backbone = Model() pmodel = PrefixModel() model = Model() checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') torch.save(model.state_dict(), checkpoint_path) state_dict = load_checkpoint(pmodel, checkpoint_path, revise_keys=[('^', 'backbone.')]) for key in pmodel.backbone.state_dict().keys(): assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key]) torch.save(pmodel.state_dict(), checkpoint_path) state_dict = load_checkpoint(model, checkpoint_path, revise_keys=[('^backbone\\.', '')]) for key in state_dict.keys(): key_stripped = re.sub('^backbone\\.', '', key) assert torch.equal(model.state_dict()[key_stripped], state_dict[key]) os.remove(checkpoint_path)
def test_load_checkpoint_metadata(): import os import tempfile from mmcv.runner import load_checkpoint, save_checkpoint class ModelV1(nn.Module): def __init__(self): super().__init__() self.block = Block() self.conv1 = nn.Conv2d(3, 3, 1) self.conv2 = nn.Conv2d(3, 3, 1) nn.init.normal_(self.conv1.weight) nn.init.normal_(self.conv2.weight) class ModelV2(nn.Module): _version = 2 def __init__(self): super().__init__() self.block = Block() self.conv0 = nn.Conv2d(3, 3, 1) self.conv1 = nn.Conv2d(3, 3, 1) nn.init.normal_(self.conv0.weight) nn.init.normal_(self.conv1.weight) def _load_from_state_dict(self, state_dict, prefix, local_metadata, *args, **kwargs): 'load checkpoints.' version = local_metadata.get('version', None) if ((version is None) or (version < 2)): state_dict_keys = list(state_dict.keys()) convert_map = {'conv1': 'conv0', 'conv2': 'conv1'} for k in state_dict_keys: for (ori_str, new_str) in convert_map.items(): if k.startswith((prefix + ori_str)): new_key = k.replace(ori_str, new_str) state_dict[new_key] = state_dict[k] del state_dict[k] super()._load_from_state_dict(state_dict, prefix, local_metadata, *args, **kwargs) model_v1 = ModelV1() model_v1_conv0_weight = model_v1.conv1.weight.detach() model_v1_conv1_weight = model_v1.conv2.weight.detach() model_v2 = ModelV2() model_v2_conv0_weight = model_v2.conv0.weight.detach() model_v2_conv1_weight = model_v2.conv1.weight.detach() ckpt_v1_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v1.pth') ckpt_v2_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v2.pth') save_checkpoint(model_v1, ckpt_v1_path) save_checkpoint(model_v2, ckpt_v2_path) load_checkpoint(model_v2, ckpt_v1_path) assert torch.allclose(model_v2.conv0.weight, model_v1_conv0_weight) assert torch.allclose(model_v2.conv1.weight, model_v1_conv1_weight) load_checkpoint(model_v2, ckpt_v2_path) assert torch.allclose(model_v2.conv0.weight, model_v2_conv0_weight) assert torch.allclose(model_v2.conv1.weight, model_v2_conv1_weight)
def test_load_classes_name(): import os import tempfile from mmcv.runner import load_checkpoint, save_checkpoint checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') model = Model() save_checkpoint(model, checkpoint_path) checkpoint = load_checkpoint(model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta'])) model.CLASSES = ('class1', 'class2') save_checkpoint(model, checkpoint_path) checkpoint = load_checkpoint(model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' in checkpoint['meta'])) assert (checkpoint['meta']['CLASSES'] == ('class1', 'class2')) model = Model() wrapped_model = DDPWrapper(model) save_checkpoint(wrapped_model, checkpoint_path) checkpoint = load_checkpoint(wrapped_model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta'])) wrapped_model.module.CLASSES = ('class1', 'class2') save_checkpoint(wrapped_model, checkpoint_path) checkpoint = load_checkpoint(wrapped_model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' in checkpoint['meta'])) assert (checkpoint['meta']['CLASSES'] == ('class1', 'class2')) os.remove(checkpoint_path)
def test_checkpoint_loader(): import os import tempfile from mmcv.runner import CheckpointLoader, _load_checkpoint, save_checkpoint checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') model = Model() save_checkpoint(model, checkpoint_path) checkpoint = _load_checkpoint(checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta'])) os.remove(checkpoint_path) filenames = ['http://xx.xx/xx.pth', 'https://xx.xx/xx.pth', 'modelzoo://xx.xx/xx.pth', 'torchvision://xx.xx/xx.pth', 'open-mmlab://xx.xx/xx.pth', 'openmmlab://xx.xx/xx.pth', 'mmcls://xx.xx/xx.pth', 'pavi://xx.xx/xx.pth', 's3://xx.xx/xx.pth', 'ss3://xx.xx/xx.pth', ' s3://xx.xx/xx.pth', 'open-mmlab:s3://xx.xx/xx.pth', 'openmmlab:s3://xx.xx/xx.pth', 'openmmlabs3://xx.xx/xx.pth', ':s3://xx.xx/xx.path'] fn_names = ['load_from_http', 'load_from_http', 'load_from_torchvision', 'load_from_torchvision', 'load_from_openmmlab', 'load_from_openmmlab', 'load_from_mmcls', 'load_from_pavi', 'load_from_ceph', 'load_from_local', 'load_from_local', 'load_from_ceph', 'load_from_ceph', 'load_from_local', 'load_from_local'] for (filename, fn_name) in zip(filenames, fn_names): loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == fn_name) @CheckpointLoader.register_scheme(prefixes='ftp://') def load_from_ftp(filename, map_location): return dict(filename=filename) filename = 'ftp://xx.xx/xx.pth' loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == 'load_from_ftp') def load_from_ftp1(filename, map_location): return dict(filename=filename) with pytest.raises(KeyError): CheckpointLoader.register_scheme('ftp://', load_from_ftp1) CheckpointLoader.register_scheme('ftp://', load_from_ftp1, force=True) checkpoint = CheckpointLoader.load_checkpoint(filename) assert (checkpoint['filename'] == filename) loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == 'load_from_ftp1') @CheckpointLoader.register_scheme(prefixes='a/b') def load_from_ab(filename, map_location): return dict(filename=filename) @CheckpointLoader.register_scheme(prefixes='a/b/c') def load_from_abc(filename, map_location): return dict(filename=filename) filename = 'a/b/c/d' loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == 'load_from_abc')
def test_save_checkpoint(tmp_path): model = Model() optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9) with pytest.raises(TypeError): save_checkpoint(model, '/path/of/your/filename', meta='invalid type') filename = str((tmp_path / 'checkpoint1.pth')) save_checkpoint(model, filename) filename = str((tmp_path / 'checkpoint2.pth')) save_checkpoint(model, filename, optimizer) filename = str((tmp_path / 'checkpoint3.pth')) save_checkpoint(model, filename, meta={'test': 'test'}) filename = str((tmp_path / 'checkpoint4.pth')) save_checkpoint(model, filename, file_client_args={'backend': 'disk'}) with patch.object(PetrelBackend, 'put') as mock_method: filename = 's3://path/of/your/checkpoint1.pth' save_checkpoint(model, filename) mock_method.assert_called() with patch.object(PetrelBackend, 'put') as mock_method: filename = 's3://path//of/your/checkpoint2.pth' save_checkpoint(model, filename, file_client_args={'backend': 'petrel'}) mock_method.assert_called()
def test_load_from_local(): import os home_path = os.path.expanduser('~') checkpoint_path = os.path.join(home_path, 'dummy_checkpoint_used_to_test_load_from_local.pth') model = Model() save_checkpoint(model, checkpoint_path) checkpoint = load_from_local('~/dummy_checkpoint_used_to_test_load_from_local.pth', map_location=None) assert_tensor_equal(checkpoint['state_dict']['block.conv.weight'], model.block.conv.weight) os.remove(checkpoint_path)