code stringlengths 17 6.64M |
|---|
def _test_roialign_allclose(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
try:
from mmcv.ops import roi_align
except ModuleNotFoundError:
pytest.skip('test requires compilation')
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
output = roi_align(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, 'avg', True)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001)
|
@pytest.mark.parametrize('device', ['cuda', 'cpu'])
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_roialign(device, dtype):
if (dtype is torch.double):
_test_roialign_gradcheck(device=device, dtype=dtype)
_test_roialign_allclose(device=device, dtype=dtype)
|
def _test_roialign_rotated_gradcheck(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('unittest does not support GPU yet.')
try:
from mmcv.ops import RoIAlignRotated
except ModuleNotFoundError:
pytest.skip('RoIAlignRotated op is not successfully compiled')
if (dtype is torch.half):
pytest.skip('grad check does not support fp16')
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
froipool = RoIAlignRotated((pool_h, pool_w), spatial_scale, sampling_ratio)
if (torch.__version__ == 'parrots'):
gradcheck(froipool, (x, rois), no_grads=[rois], delta=1e-05, pt_atol=1e-05)
else:
gradcheck(froipool, (x, rois), eps=1e-05, atol=1e-05)
|
def _test_roialign_rotated_allclose(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('unittest does not support GPU yet.')
try:
from mmcv.ops import RoIAlignRotated, roi_align_rotated
except ModuleNotFoundError:
pytest.skip('test requires compilation')
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
output = roi_align_rotated(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, True)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001)
roi_align_rotated_module_deprecated = RoIAlignRotated(out_size=(pool_h, pool_w), spatial_scale=spatial_scale, sample_num=sampling_ratio)
output_1 = roi_align_rotated_module_deprecated(x, rois)
roi_align_rotated_module_new = RoIAlignRotated(output_size=(pool_h, pool_w), spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
output_2 = roi_align_rotated_module_new(x, rois)
assert np.allclose(output_1.data.type(torch.float).cpu().numpy(), output_2.data.type(torch.float).cpu().numpy())
|
@pytest.mark.parametrize('device', ['cuda', 'cpu'])
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_roialign_rotated(device, dtype):
if (dtype is torch.double):
_test_roialign_rotated_gradcheck(device=device, dtype=dtype)
_test_roialign_rotated_allclose(device=device, dtype=dtype)
|
class TestRoiPool(object):
def test_roipool_gradcheck(self):
if (not torch.cuda.is_available()):
return
from mmcv.ops import RoIPool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for case in inputs:
np_input = np.array(case[0])
np_rois = np.array(case[1])
x = torch.tensor(np_input, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, device='cuda')
froipool = RoIPool((pool_h, pool_w), spatial_scale)
if _USING_PARROTS:
pass
else:
gradcheck(froipool, (x, rois), eps=0.01, atol=0.01)
def _test_roipool_allclose(self, dtype=torch.float):
if (not torch.cuda.is_available()):
return
from mmcv.ops import roi_pool
pool_h = 2
pool_w = 2
spatial_scale = 1.0
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device='cuda')
output = roi_pool(x, rois, (pool_h, pool_w), spatial_scale)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.cpu().numpy(), np_output, 0.001)
assert np.allclose(x.grad.data.cpu().numpy(), np_grad, 0.001)
def test_roipool_allclose(self):
self._test_roipool_allclose(torch.double)
self._test_roipool_allclose(torch.float)
self._test_roipool_allclose(torch.half)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_RoIAwarePool3d():
roiaware_pool3d_max = RoIAwarePool3d(out_size=4, max_pts_per_voxel=128, mode='max')
roiaware_pool3d_avg = RoIAwarePool3d(out_size=4, max_pts_per_voxel=128, mode='avg')
rois = torch.tensor([[1.0, 2.0, 3.0, 5.0, 4.0, 6.0, ((- 0.3) - (np.pi / 2))], [(- 10.0), 23.0, 16.0, 20.0, 10.0, 20.0, ((- 0.5) - (np.pi / 2))]], dtype=torch.float32).cuda()
pts = torch.tensor([[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]], dtype=torch.float32).cuda()
pts_feature = pts.clone()
pooled_features_max = roiaware_pool3d_max(rois=rois, pts=pts, pts_feature=pts_feature)
assert (pooled_features_max.shape == torch.Size([2, 4, 4, 4, 3]))
assert torch.allclose(pooled_features_max.sum(), torch.tensor(51.1).cuda(), 0.001)
pooled_features_avg = roiaware_pool3d_avg(rois=rois, pts=pts, pts_feature=pts_feature)
assert (pooled_features_avg.shape == torch.Size([2, 4, 4, 4, 3]))
assert torch.allclose(pooled_features_avg.sum(), torch.tensor(49.75).cuda(), 0.001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_points_in_boxes_part():
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]], [[(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)]], [[3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)], [6, 4, 9]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_part(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[0, 0, 0, 0, 0, (- 1), (- 1), (- 1)], [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda()
assert (point_indices.shape == torch.Size([2, 8]))
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_part(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[(- 1), (- 1), 0, (- 1), 0, (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda()
assert (point_indices == expected_point_indices).all()
|
def test_points_in_boxes_cpu():
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32)
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]]], dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]], dtype=torch.int32)
assert (point_indices.shape == torch.Size([1, 15, 2]))
assert (point_indices == expected_point_indices).all()
boxes = torch.tensor([[[0.0, 0.0, 0.0, 1.0, 20.0, 1.0, 0.523598]]], dtype=torch.float32)
pts = torch.tensor([[[4, 6.928, 0], [6.928, 4, 0], [4, (- 6.928), 0], [6.928, (- 4), 0], [(- 4), 6.928, 0], [(- 6.928), 4, 0], [(- 4), (- 6.928), 0], [(- 6.928), (- 4), 0]]], dtype=torch.float32)
point_indices = points_in_boxes_cpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[[0], [0], [1], [0], [1], [0], [0], [0]]], dtype=torch.int32)
assert (point_indices == expected_point_indices).all()
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_points_in_boxes_all():
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_all(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[[1, 0], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]], dtype=torch.int32).cuda()
assert (point_indices.shape == torch.Size([1, 15, 2]))
assert (point_indices == expected_point_indices).all()
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_gather_points():
feats = torch.tensor([[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)], [3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)]], dtype=torch.float32).unsqueeze(0).cuda()
points = feats.clone()
rois = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3], [(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
roipoint_pool3d = RoIPointPool3d(num_sampled_points=4)
(roi_feat, empty_flag) = roipoint_pool3d(feats, points, rois)
expected_roi_feat = torch.tensor([[[[1, 2, 3.3, 1, 2, 3.3], [1.2, 2.5, 3, 1.2, 2.5, 3], [0.8, 2.1, 3.5, 0.8, 2.1, 3.5], [1.6, 2.6, 3.6, 1.6, 2.6, 3.6]], [[(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2], [(- 9.2), 21, 18.2, (- 9.2), 21, 18.2]]]]).cuda()
expected_empty_flag = torch.tensor([[0, 0]]).int().cuda()
assert torch.allclose(roi_feat, expected_roi_feat)
assert torch.allclose(empty_flag, expected_empty_flag)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_rotated_feature_align():
feature = torch.tensor([[[[1.2924, (- 0.2172), (- 0.5222), 0.1172], [0.9144, 1.2248, 1.3115, (- 0.969)], [(- 0.8949), (- 1.1797), (- 0.9093), (- 0.3961)], [(- 0.4586), 0.5062, (- 0.7947), (- 0.7397)]], [[(- 1.0943), (- 0.7495), 1.3461, (- 1.1652)], [0.2034, 0.6763, (- 1.2357), 0.5231], [(- 1.0062), 1.2592, 1.4225, (- 0.3951)], [(- 0.1242), (- 1.624), 0.1932, 2.7181]], [[(- 1.6271), (- 1.0276), 0.0578, (- 0.2997)], [(- 0.9684), (- 1.6946), (- 1.3188), (- 1.1938)], [(- 1.6744), (- 0.8917), (- 0.6556), 1.0073], [(- 0.1205), 0.3671, (- 0.3731), (- 0.5347)]]], [[[0.7035, 0.2089, (- 0.1774), 3.467], [(- 0.8505), (- 0.9278), 1.4714, 0.1644], [0.0898, 0.3531, (- 0.4007), 0.1927], [1.2569, (- 0.2636), (- 0.5223), 0.0616]], [[0.176, (- 0.7639), (- 0.46), (- 1.326)], [(- 0.9921), (- 0.297), (- 0.8955), 1.0508], [1.3515, (- 0.1641), 1.9679, 1.1986], [(- 0.3616), 0.6287, 0.4933, 0.336]], [[(- 0.586), 0.2124, (- 0.87), 2.42], [(- 0.0551), (- 1.5103), (- 1.6779), 0.8399], [0.8431, 1.2414, (- 1.1243), (- 0.3887)], [(- 2.1254), 0.6047, (- 0.3515), 0.7254]]]], device='cuda', requires_grad=True)
bbox = torch.tensor([[[[13.08, 12.688, 11.214, 93.944, (- 0.91905)], [38.104, 10.134, 146.59, 90.306, (- 0.98211)], [(- 53.213), 49.508, 51.513, 32.055, (- 0.31954)], [26.974, 25.248, 54.495, 3.1083, (- 0.62127)]], [[(- 15.604), (- 51.908), 239.98, 15.008, (- 1.2546)], [31.354, (- 7.3635), 67.879, 35.081, (- 0.33851)], [(- 5.3292), 9.1946, 12.834, 10.485, (- 1.3039)], [(- 23.925), 36.623, 39.875, 72.009, (- 0.65934)]], [[72.114, (- 23.781), 29.106, 84.501, (- 1.134)], [26.258, (- 7.7034), 176.29, 106.15, (- 1.2156)], [38.057, 46.016, 12.965, 6.9384, (- 1.0855)], [24.428, (- 16.189), 205.72, 31.622, (- 0.15719)]], [[3.8226, 29.608, 14.457, 68.179, (- 0.91997)], [25.003, (- 42.49), 96.007, 49.086, (- 1.4786)], [85.983, 54.98, 78.08, 100.03, (- 1.0926)], [9.9065, 41.457, 5.9799, 17.973, (- 0.56313)]]], [[[(- 18.244), 4.6309, 53.01, 24.31, (- 0.70345)], [19.419, 36.704, 52.39, 54.133, (- 0.3773)], [56.387, 23.752, 9.0441, 17.792, (- 1.5583)], [36.303, 16.396, 20.283, 19.148, (- 0.83419)]], [[32.169, 30.521, 26.283, 196.8, (- 0.30454)], [25.788, (- 32.189), 88.882, 102.07, (- 1.5328)], [8.4676, (- 16.668), 24.657, 112.75, (- 0.40388)], [(- 10.799), 6.0422, 9.5807, 33.677, (- 0.35438)]], [[69.363, 10.85, 25.968, 22.311, (- 0.16408)], [2.814, 4.6843, 3.1289, 21.48, (- 0.67583)], [26.661, 45.29, 6.1679, 30.005, (- 0.89806)], [5.0871, 13.234, 92.087, 49.622, (- 0.2802)]], [[(- 12.643), 25.176, 50.488, 54.246, (- 0.4484)], [(- 34.521), 0.98435, 52.413, 9.7996, (- 0.84218)], [49.829, (- 10.808), 29.848, 73.579, (- 0.62672)], [80.446, 28.064, 45.273, 53.809, (- 1.2359)]]]], device='cuda', requires_grad=True)
expected_output = torch.tensor([[[[1.1095, (- 0.2172), (- 0.5222), (- 0.6225)], [0.9144, 0.7662, 1.0487, (- 0.969)], [(- 0.8949), (- 1.6384), (- 0.9093), (- 0.3961)], [(- 0.8604), 0.5062, (- 0.7947), (- 0.7397)]], [[(- 0.3961), (- 0.7495), 1.3461, 1.5528], [0.2034, 0.5522, (- 1.6722), 0.5231], [(- 1.0062), 1.135, 1.4225, (- 0.3951)], [(- 0.4826), (- 1.624), 0.1932, 2.7181]], [[(- 2.6436), (- 1.0276), 0.0578, (- 0.8344)], [(- 0.9684), (- 1.8151), (- 2.1843), (- 1.1938)], [(- 1.6744), (- 1.0121), (- 0.6556), 1.0073], [(- 0.8474), 0.3671, (- 0.3731), (- 0.5347)]]], [[[0.7035, 0.2089, (- 0.1774), 3.467], [(- 0.8505), (- 0.9278), 1.4714, 0.1644], [0.0898, 0.3064, (- 0.4007), 0.5849], [1.2569, (- 0.2636), (- 0.5223), 0.0616]], [[0.176, (- 0.7639), (- 0.46), (- 1.326)], [(- 0.9921), (- 0.297), (- 0.8955), 1.0508], [1.3515, (- 0.6125), 1.9679, 0.555], [(- 0.3616), 0.6287, 0.4933, 0.336]], [[(- 0.586), 0.2124, (- 0.87), 2.42], [(- 0.0551), (- 1.5103), (- 1.6779), 0.8399], [0.8431, 0.8455, (- 1.1243), (- 1.5994)], [(- 2.1254), 0.6047, (- 0.3515), 0.7254]]]]).cuda()
expected_grad = torch.tensor([[[[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]], [[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]], [[1.0, 1.8507, 1.1493, 1.5222], [1.0, 1.1511, 1.2139, 1.4778], [1.0, 1.2629, 1.3721, 1.0], [3.0, 1.0, 1.0, 2.0]]], [[[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.2687, 1.5055, 1.2382, 1.0], [1.1458, 1.4258, 1.416, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]).cuda()
output = rotated_feature_align(feature, bbox, spatial_scale=(1 / 8), points=1)
output.backward(torch.ones_like(output))
assert torch.allclose(output, expected_output, 0.01)
assert torch.allclose(feature.grad, expected_grad, 0.01)
|
def test_sacconv():
x = torch.rand(1, 3, 256, 256)
saconv = SAConv2d(3, 5, kernel_size=3, padding=1)
sac_out = saconv(x)
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1)
refer_out = refer_conv(x)
assert (sac_out.shape == refer_out.shape)
dalited_saconv = SAConv2d(3, 5, kernel_size=3, padding=2, dilation=2)
dalited_sac_out = dalited_saconv(x)
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=2, dilation=2)
refer_out = refer_conv(x)
assert (dalited_sac_out.shape == refer_out.shape)
deform_saconv = SAConv2d(3, 5, kernel_size=3, padding=1, use_deform=True)
if torch.cuda.is_available():
x = torch.rand(1, 3, 256, 256).cuda()
deform_saconv = SAConv2d(3, 5, kernel_size=3, padding=1, use_deform=True).cuda()
deform_sac_out = deform_saconv(x).cuda()
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1).cuda()
refer_out = refer_conv(x)
assert (deform_sac_out.shape == refer_out.shape)
else:
deform_sac_out = deform_saconv(x)
refer_conv = nn.Conv2d(3, 5, kernel_size=3, padding=1)
refer_out = refer_conv(x)
assert (deform_sac_out.shape == refer_out.shape)
x = torch.rand(1, 4, 256, 256)
group_saconv = SAConv2d(4, 4, kernel_size=3, padding=1, groups=2)
group_sac_out = group_saconv(x)
refer_conv = nn.Conv2d(4, 4, kernel_size=3, padding=1, groups=2)
refer_out = refer_conv(x)
assert (group_sac_out.shape == refer_out.shape)
|
def make_sparse_convmodule(in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=None, order=('conv', 'norm', 'act')):
'Make sparse convolution module.\n\n Args:\n in_channels (int): the number of input channels\n out_channels (int): the number of out channels\n kernel_size (int|tuple(int)): kernel size of convolution\n indice_key (str): the indice key used for sparse tensor\n stride (int|tuple(int)): the stride of convolution\n padding (int or list[int]): the padding number of input\n conv_type (str): sparse conv type in spconv\n norm_cfg (dict[str]): config of normalization layer\n order (tuple[str]): The order of conv/norm/activation layers. It is a\n sequence of "conv", "norm" and "act". Common examples are\n ("conv", "norm", "act") and ("act", "conv", "norm").\n\n Returns:\n spconv.SparseSequential: sparse convolution module.\n '
assert (isinstance(order, tuple) and (len(order) <= 3))
assert ((set(order) | {'conv', 'norm', 'act'}) == {'conv', 'norm', 'act'})
conv_cfg = dict(type=conv_type, indice_key=indice_key)
layers = list()
for layer in order:
if (layer == 'conv'):
if (conv_type not in ['SparseInverseConv3d', 'SparseInverseConv2d', 'SparseInverseConv1d']):
layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False))
else:
layers.append(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, bias=False))
elif (layer == 'norm'):
layers.append(build_norm_layer(norm_cfg, out_channels)[1])
elif (layer == 'act'):
layers.append(nn.ReLU(inplace=True))
layers = SparseSequential(*layers)
return layers
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_make_sparse_convmodule():
voxel_features = torch.tensor([[6.56126, 0.9648336, (- 1.7339306), 0.315], [6.8162713, (- 2.480431), (- 1.3616394), 0.36], [11.643568, (- 4.744306), (- 1.3580885), 0.16], [23.482342, 6.5036807, 0.5806964, 0.35]], dtype=torch.float32, device='cuda')
coordinates = torch.tensor([[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], [1, 35, 930, 469]], dtype=torch.int32, device='cuda')
input_sp_tensor = SparseConvTensor(voxel_features, coordinates, [41, 1600, 1408], 2)
sparse_block0 = make_sparse_convmodule(4, 16, 3, 'test0', stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('conv', 'norm', 'act')).cuda()
assert isinstance(sparse_block0[0], SubMConv3d)
assert (sparse_block0[0].in_channels == 4)
assert (sparse_block0[0].out_channels == 16)
assert isinstance(sparse_block0[1], torch.nn.BatchNorm1d)
assert (sparse_block0[1].eps == 0.001)
assert (sparse_block0[1].momentum == 0.01)
assert isinstance(sparse_block0[2], torch.nn.ReLU)
out_features = sparse_block0(input_sp_tensor)
assert (out_features.features.shape == torch.Size([4, 16]))
sparse_block1 = make_sparse_convmodule(4, 16, 3, 'test1', stride=1, padding=0, conv_type='SparseInverseConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('norm', 'act', 'conv')).cuda()
assert isinstance(sparse_block1[0], torch.nn.BatchNorm1d)
assert isinstance(sparse_block1[1], torch.nn.ReLU)
assert isinstance(sparse_block1[2], SparseInverseConv3d)
|
class TestSyncBN(object):
def dist_init(self):
rank = int(os.environ['SLURM_PROCID'])
world_size = int(os.environ['SLURM_NTASKS'])
local_rank = int(os.environ['SLURM_LOCALID'])
node_list = str(os.environ['SLURM_NODELIST'])
node_parts = re.findall('[0-9]+', node_list)
os.environ['MASTER_ADDR'] = (f'{node_parts[1]}.{node_parts[2]}' + f'.{node_parts[3]}.{node_parts[4]}')
os.environ['MASTER_PORT'] = '12341'
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['RANK'] = str(rank)
dist.init_process_group('nccl')
torch.cuda.set_device(local_rank)
def _test_syncbn_train(self, size=1, half=False):
if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) != 4)):
print('must run with slurm has 4 processes!\nsrun -p test --gres=gpu:4 -n4')
return
else:
print('Running syncbn test')
from mmcv.ops import SyncBatchNorm
assert (size in (1, 2, 4))
if (not dist.is_initialized()):
self.dist_init()
rank = dist.get_rank()
torch.manual_seed(9)
torch.cuda.manual_seed(9)
self.x = torch.rand(16, 3, 2, 3).cuda()
self.y_bp = torch.rand(16, 3, 2, 3).cuda()
if half:
self.x = self.x.half()
self.y_bp = self.y_bp.half()
dist.broadcast(self.x, src=0)
dist.broadcast(self.y_bp, src=0)
torch.cuda.synchronize()
if (size == 1):
groups = [None, None, None, None]
groups[0] = dist.new_group([0])
groups[1] = dist.new_group([1])
groups[2] = dist.new_group([2])
groups[3] = dist.new_group([3])
group = groups[rank]
elif (size == 2):
groups = [None, None, None, None]
groups[0] = groups[1] = dist.new_group([0, 1])
groups[2] = groups[3] = dist.new_group([2, 3])
group = groups[rank]
elif (size == 4):
group = dist.group.WORLD
syncbn = SyncBatchNorm(3, group=group).cuda()
syncbn.weight.data[0] = 0.2
syncbn.weight.data[1] = 0.5
syncbn.weight.data[2] = 0.7
syncbn.train()
bn = nn.BatchNorm2d(3).cuda()
bn.weight.data[0] = 0.2
bn.weight.data[1] = 0.5
bn.weight.data[2] = 0.7
bn.train()
sx = self.x[(rank * 4):((rank * 4) + 4)]
sx.requires_grad_()
sy = syncbn(sx)
sy.backward(self.y_bp[(rank * 4):((rank * 4) + 4)])
smean = syncbn.running_mean
svar = syncbn.running_var
sx_grad = sx.grad
sw_grad = syncbn.weight.grad
sb_grad = syncbn.bias.grad
if (size == 1):
x = self.x[(rank * 4):((rank * 4) + 4)]
y_bp = self.y_bp[(rank * 4):((rank * 4) + 4)]
elif (size == 2):
x = self.x[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
y_bp = self.y_bp[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
elif (size == 4):
x = self.x
y_bp = self.y_bp
x.requires_grad_()
y = bn(x)
y.backward(y_bp)
if (size == 2):
y = y[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
elif (size == 4):
y = y[(rank * 4):((rank * 4) + 4)]
mean = bn.running_mean
var = bn.running_var
if (size == 1):
x_grad = x.grad
w_grad = bn.weight.grad
b_grad = bn.bias.grad
elif (size == 2):
x_grad = x.grad[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
w_grad = (bn.weight.grad / 2)
b_grad = (bn.bias.grad / 2)
elif (size == 4):
x_grad = x.grad[(rank * 4):((rank * 4) + 4)]
w_grad = (bn.weight.grad / 4)
b_grad = (bn.bias.grad / 4)
assert np.allclose(mean.data.cpu().numpy(), smean.data.cpu().numpy(), 0.001)
assert np.allclose(var.data.cpu().numpy(), svar.data.cpu().numpy(), 0.001)
assert np.allclose(y.data.cpu().numpy(), sy.data.cpu().numpy(), 0.001)
assert np.allclose(w_grad.data.cpu().numpy(), sw_grad.data.cpu().numpy(), 0.001)
assert np.allclose(b_grad.data.cpu().numpy(), sb_grad.data.cpu().numpy(), 0.001)
assert np.allclose(x_grad.data.cpu().numpy(), sx_grad.data.cpu().numpy(), 0.01)
def _test_syncbn_empty_train(self, size=1, half=False):
if (('SLURM_NTASKS' not in os.environ) or (int(os.environ['SLURM_NTASKS']) != 4)):
print('must run with slurm has 4 processes!\nsrun -p test --gres=gpu:4 -n4')
return
else:
print('Running syncbn test')
from mmcv.ops import SyncBatchNorm
assert (size in (1, 2, 4))
if (not dist.is_initialized()):
self.dist_init()
rank = dist.get_rank()
torch.manual_seed(9)
torch.cuda.manual_seed(9)
self.x = torch.rand(0, 3, 2, 3).cuda()
self.y_bp = torch.rand(0, 3, 2, 3).cuda()
if half:
self.x = self.x.half()
self.y_bp = self.y_bp.half()
dist.broadcast(self.x, src=0)
dist.broadcast(self.y_bp, src=0)
torch.cuda.synchronize()
if (size == 1):
groups = [None, None, None, None]
groups[0] = dist.new_group([0])
groups[1] = dist.new_group([1])
groups[2] = dist.new_group([2])
groups[3] = dist.new_group([3])
group = groups[rank]
elif (size == 2):
groups = [None, None, None, None]
groups[0] = groups[1] = dist.new_group([0, 1])
groups[2] = groups[3] = dist.new_group([2, 3])
group = groups[rank]
elif (size == 4):
group = dist.group.WORLD
syncbn = SyncBatchNorm(3, group=group, stats_mode='N').cuda()
syncbn.weight.data[0] = 0.2
syncbn.weight.data[1] = 0.5
syncbn.weight.data[2] = 0.7
syncbn.train()
bn = nn.BatchNorm2d(3).cuda()
bn.weight.data[0] = 0.2
bn.weight.data[1] = 0.5
bn.weight.data[2] = 0.7
bn.train()
sx = self.x[(rank * 4):((rank * 4) + 4)]
sx.requires_grad_()
sy = syncbn(sx)
sy.backward(self.y_bp[(rank * 4):((rank * 4) + 4)])
smean = syncbn.running_mean
svar = syncbn.running_var
sx_grad = sx.grad
sw_grad = syncbn.weight.grad
sb_grad = syncbn.bias.grad
if (size == 1):
x = self.x[(rank * 4):((rank * 4) + 4)]
y_bp = self.y_bp[(rank * 4):((rank * 4) + 4)]
elif (size == 2):
x = self.x[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
y_bp = self.y_bp[((rank // 2) * 8):(((rank // 2) * 8) + 8)]
elif (size == 4):
x = self.x
y_bp = self.y_bp
x.requires_grad_()
y = bn(x)
y.backward(y_bp)
if (size == 2):
y = y[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
elif (size == 4):
y = y[(rank * 4):((rank * 4) + 4)]
mean = bn.running_mean
var = bn.running_var
if (size == 1):
x_grad = x.grad
w_grad = bn.weight.grad
b_grad = bn.bias.grad
elif (size == 2):
x_grad = x.grad[((rank % 2) * 4):(((rank % 2) * 4) + 4)]
w_grad = (bn.weight.grad / 2)
b_grad = (bn.bias.grad / 2)
elif (size == 4):
x_grad = x.grad[(rank * 4):((rank * 4) + 4)]
w_grad = (bn.weight.grad / 4)
b_grad = (bn.bias.grad / 4)
assert np.allclose(mean.data.cpu().numpy(), smean.data.cpu().numpy(), 0.001)
assert np.allclose(var.data.cpu().numpy(), svar.data.cpu().numpy(), 0.001)
assert np.allclose(y.data.cpu().numpy(), sy.data.cpu().numpy(), 0.001)
assert np.allclose(w_grad.data.cpu().numpy(), sw_grad.data.cpu().numpy(), 0.001)
assert np.allclose(b_grad.data.cpu().numpy(), sb_grad.data.cpu().numpy(), 0.001)
assert np.allclose(x_grad.data.cpu().numpy(), sx_grad.data.cpu().numpy(), 0.01)
with pytest.raises(AssertionError):
SyncBatchNorm(3, group=group, stats_mode='X')
def test_syncbn_1(self):
self._test_syncbn_train(size=1)
def test_syncbn_2(self):
self._test_syncbn_train(size=2)
def test_syncbn_4(self):
self._test_syncbn_train(size=4)
def test_syncbn_1_half(self):
self._test_syncbn_train(size=1, half=True)
def test_syncbn_2_half(self):
self._test_syncbn_train(size=2, half=True)
def test_syncbn_4_half(self):
self._test_syncbn_train(size=4, half=True)
def test_syncbn_empty_1(self):
self._test_syncbn_empty_train(size=1)
def test_syncbn_empty_2(self):
self._test_syncbn_empty_train(size=2)
def test_syncbn_empty_4(self):
self._test_syncbn_empty_train(size=4)
def test_syncbn_empty_1_half(self):
self._test_syncbn_empty_train(size=1, half=True)
def test_syncbn_empty_2_half(self):
self._test_syncbn_empty_train(size=2, half=True)
def test_syncbn_empty_4_half(self):
self._test_syncbn_empty_train(size=4, half=True)
|
def remove_tmp_file(func):
@wraps(func)
def wrapper(*args, **kwargs):
onnx_file = 'tmp.onnx'
kwargs['onnx_file'] = onnx_file
try:
result = func(*args, **kwargs)
finally:
if os.path.exists(onnx_file):
os.remove(onnx_file)
return result
return wrapper
|
@remove_tmp_file
def export_nms_module_to_onnx(module, onnx_file):
torch_model = module()
torch_model.eval()
input = (torch.rand([100, 4], dtype=torch.float32), torch.rand([100], dtype=torch.float32))
torch.onnx.export(torch_model, input, onnx_file, opset_version=11, input_names=['boxes', 'scores'], output_names=['output'])
onnx_model = onnx.load(onnx_file)
return onnx_model
|
def test_can_handle_nms_with_constant_maxnum():
class ModuleNMS(torch.nn.Module):
def forward(self, boxes, scores):
return nms(boxes, scores, iou_threshold=0.4, max_num=10)
onnx_model = export_nms_module_to_onnx(ModuleNMS)
preprocess_onnx_model = preprocess_onnx(onnx_model)
for node in preprocess_onnx_model.graph.node:
if ('NonMaxSuppression' in node.name):
assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.'
|
def test_can_handle_nms_with_undefined_maxnum():
class ModuleNMS(torch.nn.Module):
def forward(self, boxes, scores):
return nms(boxes, scores, iou_threshold=0.4)
onnx_model = export_nms_module_to_onnx(ModuleNMS)
preprocess_onnx_model = preprocess_onnx(onnx_model)
for node in preprocess_onnx_model.graph.node:
if ('NonMaxSuppression' in node.name):
assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.'
assert (node.attribute[2].i > 0), 'The max_output_boxes_per_class is not defined correctly.'
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_three_interpolate():
features = torch.tensor([[[2.435, 4.7516, 4.4995, 2.435, 2.435, 2.435], [3.1236, 2.6278, 3.0447, 3.1236, 3.1236, 3.1236], [2.6732, 2.8677, 2.6436, 2.6732, 2.6732, 2.6732], [0.0124, 7.015, 7.0199, 0.0124, 0.0124, 0.0124], [0.3207, 0.0, 0.3411, 0.3207, 0.3207, 0.3207]], [[0.0, 0.9544, 2.4532, 0.0, 0.0, 0.0], [0.5346, 1.9176, 1.4715, 0.5346, 0.5346, 0.5346], [0.0, 0.2744, 2.0842, 0.0, 0.0, 0.0], [0.3414, 1.5063, 1.6209, 0.3414, 0.3414, 0.3414], [0.5814, 0.0103, 0.0, 0.5814, 0.5814, 0.5814]]]).cuda()
idx = torch.tensor([[[0, 1, 2], [2, 3, 4], [2, 3, 4], [0, 1, 2], [0, 1, 2], [0, 1, 3]], [[0, 2, 3], [1, 3, 4], [2, 1, 4], [0, 2, 4], [0, 2, 4], [0, 1, 2]]]).int().cuda()
weight = torch.tensor([[[0.33333, 0.33333, 0.33333], [1.0, 5.8155e-08, 2.2373e-08], [1.0, 1.7737e-08, 1.7356e-08], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333]], [[0.33333, 0.33333, 0.33333], [1.0, 1.3651e-08, 7.7312e-09], [1.0, 1.7148e-08, 1.407e-08], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333], [0.33333, 0.33333, 0.33333]]]).cuda()
output = three_interpolate(features, idx, weight)
expected_output = torch.tensor([[[3.8953, 4.4995, 4.4995, 3.8953, 3.8953, 3.2072], [2.932, 3.0447, 3.0447, 2.932, 2.932, 2.9583], [2.7281, 2.6436, 2.6436, 2.7281, 2.7281, 2.738], [4.6824, 7.0199, 7.0199, 4.6824, 4.6824, 2.3466], [0.2206, 0.3411, 0.3411, 0.2206, 0.2206, 0.2138]], [[0.81773, 0.9544, 2.4532, 0.81773, 0.81773, 1.1359], [0.84689, 1.9176, 1.4715, 0.84689, 0.84689, 1.3079], [0.69473, 0.2744, 2.0842, 0.69473, 0.69473, 0.78619], [0.76789, 1.5063, 1.6209, 0.76789, 0.76789, 1.1562], [0.3876, 0.0103, 8.3569e-09, 0.3876, 0.3876, 0.19723]]]).cuda()
assert torch.allclose(output, expected_output, 0.0001)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_three_nn():
known = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.8373), 3.5605, (- 0.7867)], [(- 1.8373), 3.5605, (- 0.7867)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 1.3399), 1.9991, (- 0.3698)], [(- 1.3399), 1.9991, (- 0.3698)]]]).cuda()
unknown = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.5237), 2.3976, (- 0.8097)], [(- 0.0722), 3.4017, (- 0.288)], [0.5198, 3.0661, (- 0.4605)], [(- 2.0185), 3.5019, (- 0.3236)], [0.5098, 3.102, 0.5799], [(- 1.6137), 3.8443, (- 0.5269)], [0.7341, 2.9626, (- 0.3189)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 0.9022), 1.656, (- 1.309)], [0.1156, 1.6901, (- 0.4366)], [(- 0.6477), 2.3576, (- 0.1563)], [(- 0.8482), 1.1466, (- 1.2704)], [(- 0.8753), 2.0845, (- 0.346)], [(- 0.5621), 1.4233, (- 1.2858)], [(- 0.5883), 1.3114, (- 1.2899)]]]).cuda()
(dist, idx) = three_nn(unknown, known)
expected_dist = torch.tensor([[[0.0, 0.0, 0.0], [0.0, 2.0463, 2.8588], [0.0, 1.2229, 1.2229], [1.2047, 1.2047, 1.2047], [1.0011, 1.0845, 1.8411], [0.7433, 1.4451, 2.4304], [0.5007, 0.5007, 0.5007], [0.4587, 2.0875, 2.7544], [0.445, 0.445, 0.445], [0.5514, 1.7206, 2.6811]], [[0.0, 0.0, 0.0], [0.0, 1.6464, 1.6952], [0.0, 1.5125, 1.5125], [1.0915, 1.0915, 1.0915], [0.8197, 0.8511, 1.4894], [0.7433, 0.8082, 0.8082], [0.8955, 1.334, 1.334], [0.473, 0.473, 0.473], [0.7949, 1.3325, 1.3325], [0.7566, 1.3727, 1.3727]]]).cuda()
expected_idx = torch.tensor([[[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [1, 2, 0], [0, 3, 4], [1, 2, 0], [0, 3, 4], [1, 2, 0]], [[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [2, 0, 3], [1, 0, 3], [0, 3, 4], [1, 0, 3], [1, 0, 3]]]).cuda()
assert torch.allclose(dist, expected_dist, 0.0001)
assert torch.all((idx == expected_idx))
|
def _test_tinshift_gradcheck(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
if (dtype == torch.half):
pytest.skip('"add_cpu/sub_cpu" not implemented for Half')
for shift in shifts:
np_input = np.array(inputs)
np_shift = np.array(shift)
x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True)
shift = torch.tensor(np_shift, device='cuda').int()
if (torch.__version__ == 'parrots'):
gradcheck(tin_shift, (x, shift))
else:
gradcheck(tin_shift, (x, shift), atol=1, rtol=0.1)
|
def _test_tinshift_allclose(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
for (shift, output, grad) in zip(shifts, outputs, grads):
np_input = np.array(inputs)
np_shift = np.array(shift)
np_output = np.array(output)
np_grad = np.array(grad)
x = torch.tensor(np_input, dtype=dtype, device='cuda', requires_grad=True)
shift = torch.tensor(np_shift, device='cuda').int()
output = tin_shift(x, shift)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, 0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, 0.001)
|
def _test_tinshift_assert(dtype):
try:
from mmcv.ops import tin_shift
except ModuleNotFoundError:
pytest.skip('TINShift op is not successfully compiled')
inputs = [torch.rand(2, 3, 4, 2), torch.rand(2, 3, 4, 2)]
shifts = [torch.rand(2, 3), torch.rand(2, 5)]
for (x, shift) in zip(inputs, shifts):
x = x.cuda()
shift = shift.cuda()
with pytest.raises(ValueError):
tin_shift(x, shift)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
@pytest.mark.parametrize('dtype', [torch.float, torch.double, torch.half])
def test_tinshift(dtype):
_test_tinshift_allclose(dtype=dtype)
_test_tinshift_gradcheck(dtype=dtype)
_test_tinshift_assert(dtype=dtype)
|
def mock(*args, **kwargs):
pass
|
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_module_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
if hasattr(torch.distributed, '_verify_model_across_ranks'):
torch.distributed._verify_model_across_ranks = mock
if hasattr(torch.distributed, '_verify_params_across_processes'):
torch.distributed._verify_params_across_processes = mock
model = Model()
assert (not is_module_wrapper(model))
dp = DataParallel(model)
assert is_module_wrapper(dp)
mmdp = MMDataParallel(model)
assert is_module_wrapper(mmdp)
ddp = DistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(ddp)
mmddp = MMDistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(mmddp)
deprecated_mmddp = DeprecatedMMDDP(model)
assert is_module_wrapper(deprecated_mmddp)
@MODULE_WRAPPERS.register_module()
class ModuleWrapper(object):
def __init__(self, module):
self.module = module
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
module_wraper = ModuleWrapper(model)
assert is_module_wrapper(module_wraper)
|
def test_get_input_device():
input = torch.zeros([1, 3, 3, 3])
assert (get_input_device(input) == (- 1))
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
assert (get_input_device(inputs) == (- 1))
if torch.cuda.is_available():
input = torch.zeros([1, 3, 3, 3]).cuda()
assert (get_input_device(input) == 0)
inputs = [torch.zeros([1, 3, 3, 3]).cuda(), torch.zeros([1, 4, 4, 4]).cuda()]
assert (get_input_device(inputs) == 0)
with pytest.raises(Exception):
get_input_device(5)
|
def test_scatter():
input = torch.zeros([1, 3, 3, 3])
output = scatter(input=input, devices=[(- 1)])
assert torch.allclose(input, output)
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = scatter(input=inputs, devices=[(- 1)])
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input, output)
if torch.cuda.is_available():
input = torch.zeros([1, 3, 3, 3])
output = scatter(input=input, devices=[0])
assert torch.allclose(input.cuda(), output)
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = scatter(input=inputs, devices=[0])
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input.cuda(), output)
with pytest.raises(Exception):
scatter(5, [(- 1)])
|
def test_Scatter():
target_gpus = [(- 1)]
input = torch.zeros([1, 3, 3, 3])
outputs = Scatter.forward(target_gpus, input)
assert isinstance(outputs, tuple)
assert torch.allclose(input, outputs[0])
target_gpus = [(- 1)]
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = Scatter.forward(target_gpus, inputs)
assert isinstance(outputs, tuple)
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input, output)
if torch.cuda.is_available():
target_gpus = [0]
input = torch.zeros([1, 3, 3, 3])
outputs = Scatter.forward(target_gpus, input)
assert isinstance(outputs, tuple)
assert torch.allclose(input.cuda(), outputs[0])
target_gpus = [0]
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = Scatter.forward(target_gpus, inputs)
assert isinstance(outputs, tuple)
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input.cuda(), output[0])
|
@COMPONENTS.register_module()
class FooConv1d(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.conv1d = nn.Conv1d(4, 1, 4)
def forward(self, x):
return self.conv1d(x)
|
@COMPONENTS.register_module()
class FooConv2d(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.conv2d = nn.Conv2d(3, 1, 3)
def forward(self, x):
return self.conv2d(x)
|
@COMPONENTS.register_module()
class FooLinear(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.linear = nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
|
@COMPONENTS.register_module()
class FooLinearConv1d(BaseModule):
def __init__(self, linear=None, conv1d=None, init_cfg=None):
super().__init__(init_cfg)
if (linear is not None):
self.linear = build_from_cfg(linear, COMPONENTS)
if (conv1d is not None):
self.conv1d = build_from_cfg(conv1d, COMPONENTS)
def forward(self, x):
x = self.linear(x)
return self.conv1d(x)
|
@FOOMODELS.register_module()
class FooModel(BaseModule):
def __init__(self, component1=None, component2=None, component3=None, component4=None, init_cfg=None) -> None:
super().__init__(init_cfg)
if (component1 is not None):
self.component1 = build_from_cfg(component1, COMPONENTS)
if (component2 is not None):
self.component2 = build_from_cfg(component2, COMPONENTS)
if (component3 is not None):
self.component3 = build_from_cfg(component3, COMPONENTS)
if (component4 is not None):
self.component4 = build_from_cfg(component4, COMPONENTS)
self.reg = nn.Linear(3, 4)
|
def test_initilization_info_logger():
import os
import torch.nn as nn
from mmcv.utils.logging import get_logger
class OverloadInitConv(nn.Conv2d, BaseModule):
def init_weights(self):
for p in self.parameters():
with torch.no_grad():
p.fill_(1)
class CheckLoggerModel(BaseModule):
def __init__(self, init_cfg=None):
super(CheckLoggerModel, self).__init__(init_cfg)
self.conv1 = nn.Conv2d(1, 1, 1, 1)
self.conv2 = OverloadInitConv(1, 1, 1, 1)
self.conv3 = nn.Conv2d(1, 1, 1, 1)
self.fc1 = nn.Linear(1, 1)
init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)]
model = CheckLoggerModel(init_cfg=init_cfg)
train_log = '20210720_132454.log'
workdir = tempfile.mkdtemp()
log_file = os.path.join(workdir, train_log)
get_logger('init_logger', log_file=log_file)
assert (not hasattr(model, '_params_init_info'))
model.init_weights()
assert (not hasattr(model, '_params_init_info'))
assert os.path.exists(log_file)
lines = mmcv.list_from_file(log_file)
for (i, line) in enumerate(lines):
if ('conv1.weight' in line):
assert ('NormalInit' in lines[(i + 1)])
if ('conv2.weight' in line):
assert ('OverloadInitConv' in lines[(i + 1)])
if ('fc1.weight' in line):
assert ('ConstantInit' in lines[(i + 1)])
class OverloadInitConvFc(nn.Conv2d, BaseModule):
def __init__(self, *args, **kwargs):
super(OverloadInitConvFc, self).__init__(*args, **kwargs)
self.conv1 = nn.Linear(1, 1)
def init_weights(self):
for p in self.parameters():
with torch.no_grad():
p.fill_(1)
class CheckLoggerModel(BaseModule):
def __init__(self, init_cfg=None):
super(CheckLoggerModel, self).__init__(init_cfg)
self.conv1 = nn.Conv2d(1, 1, 1, 1)
self.conv2 = OverloadInitConvFc(1, 1, 1, 1)
self.conv3 = nn.Conv2d(1, 1, 1, 1)
self.fc1 = nn.Linear(1, 1)
class TopLevelModule(BaseModule):
def __init__(self, init_cfg=None, checklog_init_cfg=None):
super(TopLevelModule, self).__init__(init_cfg)
self.module1 = CheckLoggerModel(checklog_init_cfg)
self.module2 = OverloadInitConvFc(1, 1, 1, 1)
checklog_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)]
top_level_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='module2', std=0.01, bias_prob=0.01))]
model = TopLevelModule(init_cfg=top_level_init_cfg, checklog_init_cfg=checklog_init_cfg)
model.module1.init_weights()
model.module2.init_weights()
model.init_weights()
model.module1.init_weights()
model.module2.init_weights()
assert (not hasattr(model, '_params_init_info'))
model.init_weights()
assert (not hasattr(model, '_params_init_info'))
assert os.path.exists(log_file)
lines = mmcv.list_from_file(log_file)
for (i, line) in enumerate(lines):
if (('TopLevelModule' in line) and ('init_cfg' not in line)):
assert ('the same' in line)
|
def test_update_init_info():
class DummyModel(BaseModule):
def __init__(self, init_cfg=None):
super().__init__(init_cfg)
self.conv1 = nn.Conv2d(1, 1, 1, 1)
self.conv3 = nn.Conv2d(1, 1, 1, 1)
self.fc1 = nn.Linear(1, 1)
model = DummyModel()
from collections import defaultdict
model._params_init_info = defaultdict(dict)
for (name, param) in model.named_parameters():
model._params_init_info[param]['init_info'] = 'init'
model._params_init_info[param]['tmp_mean_value'] = param.data.mean()
with torch.no_grad():
for p in model.parameters():
p.fill_(1)
update_init_info(model, init_info='fill_1')
for item in model._params_init_info.values():
assert (item['init_info'] == 'fill_1')
assert (item['tmp_mean_value'] == 1)
model.conv1.bias = nn.Parameter(torch.ones_like(model.conv1.bias))
with pytest.raises(AssertionError):
update_init_info(model, init_info=' ')
|
def test_model_weight_init():
'\n Config\n model (FooModel, Linear: weight=1, bias=2, Conv1d: weight=3, bias=4,\n Conv2d: weight=5, bias=6)\n ├──component1 (FooConv1d)\n ├──component2 (FooConv2d)\n ├──component3 (FooLinear)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear)\n ├──conv1d (FooConv1d)\n ├──reg (nn.Linear)\n\n Parameters after initialization\n model (FooModel)\n ├──component1 (FooConv1d, weight=3, bias=4)\n ├──component2 (FooConv2d, weight=5, bias=6)\n ├──component3 (FooLinear, weight=1, bias=2)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear, weight=1, bias=2)\n ├──conv1d (FooConv1d, weight=3, bias=4)\n ├──reg (nn.Linear, weight=1, bias=2)\n '
model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear'), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d'), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear'), component4=dict(type='FooLinearConv1d', linear=dict(type='FooLinear'), conv1d=dict(type='FooConv1d')))
model = build_from_cfg(model_cfg, FOOMODELS)
model.init_weights()
assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 3.0))
assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 4.0))
assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 5.0))
assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 6.0))
assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0))
assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0))
assert torch.equal(model.component4.linear.linear.weight, torch.full(model.component4.linear.linear.weight.shape, 1.0))
assert torch.equal(model.component4.linear.linear.bias, torch.full(model.component4.linear.linear.bias.shape, 2.0))
assert torch.equal(model.component4.conv1d.conv1d.weight, torch.full(model.component4.conv1d.conv1d.weight.shape, 3.0))
assert torch.equal(model.component4.conv1d.conv1d.bias, torch.full(model.component4.conv1d.conv1d.bias.shape, 4.0))
assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 1.0))
assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 2.0))
|
def test_nest_components_weight_init():
'\n Config\n model (FooModel, Linear: weight=1, bias=2, Conv1d: weight=3, bias=4,\n Conv2d: weight=5, bias=6)\n ├──component1 (FooConv1d, Conv1d: weight=7, bias=8)\n ├──component2 (FooConv2d, Conv2d: weight=9, bias=10)\n ├──component3 (FooLinear)\n ├──component4 (FooLinearConv1d, Linear: weight=11, bias=12)\n ├──linear (FooLinear, Linear: weight=11, bias=12)\n ├──conv1d (FooConv1d)\n ├──reg (nn.Linear, weight=13, bias=14)\n\n Parameters after initialization\n model (FooModel)\n ├──component1 (FooConv1d, weight=7, bias=8)\n ├──component2 (FooConv2d, weight=9, bias=10)\n ├──component3 (FooLinear, weight=1, bias=2)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear, weight=1, bias=2)\n ├──conv1d (FooConv1d, weight=3, bias=4)\n ├──reg (nn.Linear, weight=13, bias=14)\n '
model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear', override=dict(type='Constant', name='reg', val=13, bias=14)), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=7, bias=8)), component2=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=9, bias=10)), component3=dict(type='FooLinear'), component4=dict(type='FooLinearConv1d', linear=dict(type='FooLinear'), conv1d=dict(type='FooConv1d')))
model = build_from_cfg(model_cfg, FOOMODELS)
model.init_weights()
assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 7.0))
assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 8.0))
assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 9.0))
assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 10.0))
assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0))
assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0))
assert torch.equal(model.component4.linear.linear.weight, torch.full(model.component4.linear.linear.weight.shape, 1.0))
assert torch.equal(model.component4.linear.linear.bias, torch.full(model.component4.linear.linear.bias.shape, 2.0))
assert torch.equal(model.component4.conv1d.conv1d.weight, torch.full(model.component4.conv1d.conv1d.weight.shape, 3.0))
assert torch.equal(model.component4.conv1d.conv1d.bias, torch.full(model.component4.conv1d.conv1d.bias.shape, 4.0))
assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 13.0))
assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 14.0))
|
def test_without_layer_weight_init():
model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear'), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d', init_cfg=dict(type='Constant', val=7, bias=8)), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear'))
model = build_from_cfg(model_cfg, FOOMODELS)
model.init_weights()
assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 3.0))
assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 4.0))
assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 5.0))
assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 6.0))
assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0))
assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0))
assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 1.0))
assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 2.0))
|
def test_override_weight_init():
model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=10, bias=20, override=dict(name='reg'))], component1=dict(type='FooConv1d'), component3=dict(type='FooLinear'))
model = build_from_cfg(model_cfg, FOOMODELS)
model.init_weights()
assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 10.0))
assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 20.0))
assert (not torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 10.0)))
assert (not torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 20.0)))
assert (not torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 10.0)))
assert (not torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 20.0)))
model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, override=dict(name='reg', type='Constant', val=30, bias=40))], component1=dict(type='FooConv1d'), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear'))
model = build_from_cfg(model_cfg, FOOMODELS)
model.init_weights()
assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 30.0))
assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 40.0))
|
def test_sequential_model_weight_init():
seq_model_cfg = [dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))]
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg]
seq_model = Sequential(*layers)
seq_model.init_weights()
assert torch.equal(seq_model[0].conv1d.weight, torch.full(seq_model[0].conv1d.weight.shape, 0.0))
assert torch.equal(seq_model[0].conv1d.bias, torch.full(seq_model[0].conv1d.bias.shape, 1.0))
assert torch.equal(seq_model[1].conv2d.weight, torch.full(seq_model[1].conv2d.weight.shape, 2.0))
assert torch.equal(seq_model[1].conv2d.bias, torch.full(seq_model[1].conv2d.bias.shape, 3.0))
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg]
seq_model = Sequential(*layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0))
seq_model.init_weights()
assert torch.equal(seq_model[0].conv1d.weight, torch.full(seq_model[0].conv1d.weight.shape, 0.0))
assert torch.equal(seq_model[0].conv1d.bias, torch.full(seq_model[0].conv1d.bias.shape, 1.0))
assert torch.equal(seq_model[1].conv2d.weight, torch.full(seq_model[1].conv2d.weight.shape, 2.0))
assert torch.equal(seq_model[1].conv2d.bias, torch.full(seq_model[1].conv2d.bias.shape, 3.0))
|
def test_modulelist_weight_init():
models_cfg = [dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))]
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg]
modellist = ModuleList(layers)
modellist.init_weights()
assert torch.equal(modellist[0].conv1d.weight, torch.full(modellist[0].conv1d.weight.shape, 0.0))
assert torch.equal(modellist[0].conv1d.bias, torch.full(modellist[0].conv1d.bias.shape, 1.0))
assert torch.equal(modellist[1].conv2d.weight, torch.full(modellist[1].conv2d.weight.shape, 2.0))
assert torch.equal(modellist[1].conv2d.bias, torch.full(modellist[1].conv2d.bias.shape, 3.0))
layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg]
modellist = ModuleList(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0))
modellist.init_weights()
assert torch.equal(modellist[0].conv1d.weight, torch.full(modellist[0].conv1d.weight.shape, 0.0))
assert torch.equal(modellist[0].conv1d.bias, torch.full(modellist[0].conv1d.bias.shape, 1.0))
assert torch.equal(modellist[1].conv2d.weight, torch.full(modellist[1].conv2d.weight.shape, 2.0))
assert torch.equal(modellist[1].conv2d.bias, torch.full(modellist[1].conv2d.bias.shape, 3.0))
|
def test_moduledict_weight_init():
models_cfg = dict(foo_conv_1d=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), foo_conv_2d=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0)))
layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()}
modeldict = ModuleDict(layers)
modeldict.init_weights()
assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0))
assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0))
layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()}
modeldict = ModuleDict(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0))
modeldict.init_weights()
assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0))
assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0))
assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0))
|
@MODULE_WRAPPERS.register_module()
class DDPWrapper(object):
def __init__(self, module):
self.module = module
|
class Block(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.norm = nn.BatchNorm2d(3)
|
class Model(nn.Module):
def __init__(self):
super().__init__()
self.block = Block()
self.conv = nn.Conv2d(3, 3, 1)
|
class Mockpavimodel(object):
def __init__(self, name='fakename'):
self.name = name
def download(self, file):
pass
|
def assert_tensor_equal(tensor_a, tensor_b):
assert tensor_a.eq(tensor_b).all()
|
def test_get_state_dict():
if (torch.__version__ == 'parrots'):
state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'conv.weight', 'conv.bias'])
else:
state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'block.norm.num_batches_tracked', 'conv.weight', 'conv.bias'])
model = Model()
state_dict = get_state_dict(model)
assert isinstance(state_dict, OrderedDict)
assert (set(state_dict.keys()) == state_dict_keys)
assert_tensor_equal(state_dict['block.conv.weight'], model.block.conv.weight)
assert_tensor_equal(state_dict['block.conv.bias'], model.block.conv.bias)
assert_tensor_equal(state_dict['block.norm.weight'], model.block.norm.weight)
assert_tensor_equal(state_dict['block.norm.bias'], model.block.norm.bias)
assert_tensor_equal(state_dict['block.norm.running_mean'], model.block.norm.running_mean)
assert_tensor_equal(state_dict['block.norm.running_var'], model.block.norm.running_var)
if (torch.__version__ != 'parrots'):
assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], model.block.norm.num_batches_tracked)
assert_tensor_equal(state_dict['conv.weight'], model.conv.weight)
assert_tensor_equal(state_dict['conv.bias'], model.conv.bias)
wrapped_model = DDPWrapper(model)
state_dict = get_state_dict(wrapped_model)
assert isinstance(state_dict, OrderedDict)
assert (set(state_dict.keys()) == state_dict_keys)
assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.conv.weight)
assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.conv.bias)
assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.norm.weight)
assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.norm.bias)
assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.norm.running_mean)
assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.norm.running_var)
if (torch.__version__ != 'parrots'):
assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.norm.num_batches_tracked)
assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.weight)
assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.bias)
for (name, module) in wrapped_model.module._modules.items():
module = DataParallel(module)
wrapped_model.module._modules[name] = module
state_dict = get_state_dict(wrapped_model)
assert isinstance(state_dict, OrderedDict)
assert (set(state_dict.keys()) == state_dict_keys)
assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.module.conv.weight)
assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.module.conv.bias)
assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.module.norm.weight)
assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.module.norm.bias)
assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.module.norm.running_mean)
assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.module.norm.running_var)
if (torch.__version__ != 'parrots'):
assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.module.norm.num_batches_tracked)
assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.module.weight)
assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.module.bias)
|
def test_load_pavimodel_dist():
sys.modules['pavi'] = MagicMock()
sys.modules['pavi.modelcloud'] = MagicMock()
pavimodel = Mockpavimodel()
import pavi
pavi.modelcloud.get = MagicMock(return_value=pavimodel)
with pytest.raises(AssertionError):
_ = load_from_pavi('MyPaviFolder/checkpoint.pth')
with pytest.raises(FileNotFoundError):
_ = load_from_pavi('pavi://checkpoint.pth')
|
def test_load_checkpoint_with_prefix():
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3)
model = FooModule()
nn.init.constant_(model.linear.weight, 1)
nn.init.constant_(model.linear.bias, 2)
nn.init.constant_(model.conv2d.weight, 3)
nn.init.constant_(model.conv2d.bias, 4)
nn.init.constant_(model.conv2d_2.weight, 5)
nn.init.constant_(model.conv2d_2.bias, 6)
with TemporaryDirectory():
torch.save(model.state_dict(), 'model.pth')
prefix = 'conv2d'
state_dict = _load_checkpoint_with_prefix(prefix, 'model.pth')
assert torch.equal(model.conv2d.state_dict()['weight'], state_dict['weight'])
assert torch.equal(model.conv2d.state_dict()['bias'], state_dict['bias'])
with pytest.raises(AssertionError):
prefix = 'back'
_load_checkpoint_with_prefix(prefix, 'model.pth')
|
def test_load_checkpoint():
import os
import re
import tempfile
class PrefixModel(nn.Module):
def __init__(self):
super().__init__()
self.backbone = Model()
pmodel = PrefixModel()
model = Model()
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
torch.save(model.state_dict(), checkpoint_path)
state_dict = load_checkpoint(pmodel, checkpoint_path, revise_keys=[('^', 'backbone.')])
for key in pmodel.backbone.state_dict().keys():
assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key])
torch.save(pmodel.state_dict(), checkpoint_path)
state_dict = load_checkpoint(model, checkpoint_path, revise_keys=[('^backbone\\.', '')])
for key in state_dict.keys():
key_stripped = re.sub('^backbone\\.', '', key)
assert torch.equal(model.state_dict()[key_stripped], state_dict[key])
os.remove(checkpoint_path)
|
def test_load_checkpoint_metadata():
import os
import tempfile
from mmcv.runner import load_checkpoint, save_checkpoint
class ModelV1(nn.Module):
def __init__(self):
super().__init__()
self.block = Block()
self.conv1 = nn.Conv2d(3, 3, 1)
self.conv2 = nn.Conv2d(3, 3, 1)
nn.init.normal_(self.conv1.weight)
nn.init.normal_(self.conv2.weight)
class ModelV2(nn.Module):
_version = 2
def __init__(self):
super().__init__()
self.block = Block()
self.conv0 = nn.Conv2d(3, 3, 1)
self.conv1 = nn.Conv2d(3, 3, 1)
nn.init.normal_(self.conv0.weight)
nn.init.normal_(self.conv1.weight)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, *args, **kwargs):
'load checkpoints.'
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
state_dict_keys = list(state_dict.keys())
convert_map = {'conv1': 'conv0', 'conv2': 'conv1'}
for k in state_dict_keys:
for (ori_str, new_str) in convert_map.items():
if k.startswith((prefix + ori_str)):
new_key = k.replace(ori_str, new_str)
state_dict[new_key] = state_dict[k]
del state_dict[k]
super()._load_from_state_dict(state_dict, prefix, local_metadata, *args, **kwargs)
model_v1 = ModelV1()
model_v1_conv0_weight = model_v1.conv1.weight.detach()
model_v1_conv1_weight = model_v1.conv2.weight.detach()
model_v2 = ModelV2()
model_v2_conv0_weight = model_v2.conv0.weight.detach()
model_v2_conv1_weight = model_v2.conv1.weight.detach()
ckpt_v1_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v1.pth')
ckpt_v2_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v2.pth')
save_checkpoint(model_v1, ckpt_v1_path)
save_checkpoint(model_v2, ckpt_v2_path)
load_checkpoint(model_v2, ckpt_v1_path)
assert torch.allclose(model_v2.conv0.weight, model_v1_conv0_weight)
assert torch.allclose(model_v2.conv1.weight, model_v1_conv1_weight)
load_checkpoint(model_v2, ckpt_v2_path)
assert torch.allclose(model_v2.conv0.weight, model_v2_conv0_weight)
assert torch.allclose(model_v2.conv1.weight, model_v2_conv1_weight)
|
def test_load_classes_name():
import os
import tempfile
from mmcv.runner import load_checkpoint, save_checkpoint
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
model = Model()
save_checkpoint(model, checkpoint_path)
checkpoint = load_checkpoint(model, checkpoint_path)
assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta']))
model.CLASSES = ('class1', 'class2')
save_checkpoint(model, checkpoint_path)
checkpoint = load_checkpoint(model, checkpoint_path)
assert (('meta' in checkpoint) and ('CLASSES' in checkpoint['meta']))
assert (checkpoint['meta']['CLASSES'] == ('class1', 'class2'))
model = Model()
wrapped_model = DDPWrapper(model)
save_checkpoint(wrapped_model, checkpoint_path)
checkpoint = load_checkpoint(wrapped_model, checkpoint_path)
assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta']))
wrapped_model.module.CLASSES = ('class1', 'class2')
save_checkpoint(wrapped_model, checkpoint_path)
checkpoint = load_checkpoint(wrapped_model, checkpoint_path)
assert (('meta' in checkpoint) and ('CLASSES' in checkpoint['meta']))
assert (checkpoint['meta']['CLASSES'] == ('class1', 'class2'))
os.remove(checkpoint_path)
|
def test_checkpoint_loader():
import os
import tempfile
from mmcv.runner import CheckpointLoader, _load_checkpoint, save_checkpoint
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
model = Model()
save_checkpoint(model, checkpoint_path)
checkpoint = _load_checkpoint(checkpoint_path)
assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta']))
os.remove(checkpoint_path)
filenames = ['http://xx.xx/xx.pth', 'https://xx.xx/xx.pth', 'modelzoo://xx.xx/xx.pth', 'torchvision://xx.xx/xx.pth', 'open-mmlab://xx.xx/xx.pth', 'openmmlab://xx.xx/xx.pth', 'mmcls://xx.xx/xx.pth', 'pavi://xx.xx/xx.pth', 's3://xx.xx/xx.pth', 'ss3://xx.xx/xx.pth', ' s3://xx.xx/xx.pth', 'open-mmlab:s3://xx.xx/xx.pth', 'openmmlab:s3://xx.xx/xx.pth', 'openmmlabs3://xx.xx/xx.pth', ':s3://xx.xx/xx.path']
fn_names = ['load_from_http', 'load_from_http', 'load_from_torchvision', 'load_from_torchvision', 'load_from_openmmlab', 'load_from_openmmlab', 'load_from_mmcls', 'load_from_pavi', 'load_from_ceph', 'load_from_local', 'load_from_local', 'load_from_ceph', 'load_from_ceph', 'load_from_local', 'load_from_local']
for (filename, fn_name) in zip(filenames, fn_names):
loader = CheckpointLoader._get_checkpoint_loader(filename)
assert (loader.__name__ == fn_name)
@CheckpointLoader.register_scheme(prefixes='ftp://')
def load_from_ftp(filename, map_location):
return dict(filename=filename)
filename = 'ftp://xx.xx/xx.pth'
loader = CheckpointLoader._get_checkpoint_loader(filename)
assert (loader.__name__ == 'load_from_ftp')
def load_from_ftp1(filename, map_location):
return dict(filename=filename)
with pytest.raises(KeyError):
CheckpointLoader.register_scheme('ftp://', load_from_ftp1)
CheckpointLoader.register_scheme('ftp://', load_from_ftp1, force=True)
checkpoint = CheckpointLoader.load_checkpoint(filename)
assert (checkpoint['filename'] == filename)
loader = CheckpointLoader._get_checkpoint_loader(filename)
assert (loader.__name__ == 'load_from_ftp1')
@CheckpointLoader.register_scheme(prefixes='a/b')
def load_from_ab(filename, map_location):
return dict(filename=filename)
@CheckpointLoader.register_scheme(prefixes='a/b/c')
def load_from_abc(filename, map_location):
return dict(filename=filename)
filename = 'a/b/c/d'
loader = CheckpointLoader._get_checkpoint_loader(filename)
assert (loader.__name__ == 'load_from_abc')
|
def test_save_checkpoint(tmp_path):
model = Model()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
with pytest.raises(TypeError):
save_checkpoint(model, '/path/of/your/filename', meta='invalid type')
filename = str((tmp_path / 'checkpoint1.pth'))
save_checkpoint(model, filename)
filename = str((tmp_path / 'checkpoint2.pth'))
save_checkpoint(model, filename, optimizer)
filename = str((tmp_path / 'checkpoint3.pth'))
save_checkpoint(model, filename, meta={'test': 'test'})
filename = str((tmp_path / 'checkpoint4.pth'))
save_checkpoint(model, filename, file_client_args={'backend': 'disk'})
with patch.object(PetrelBackend, 'put') as mock_method:
filename = 's3://path/of/your/checkpoint1.pth'
save_checkpoint(model, filename)
mock_method.assert_called()
with patch.object(PetrelBackend, 'put') as mock_method:
filename = 's3://path//of/your/checkpoint2.pth'
save_checkpoint(model, filename, file_client_args={'backend': 'petrel'})
mock_method.assert_called()
|
def test_load_from_local():
import os
home_path = os.path.expanduser('~')
checkpoint_path = os.path.join(home_path, 'dummy_checkpoint_used_to_test_load_from_local.pth')
model = Model()
save_checkpoint(model, checkpoint_path)
checkpoint = load_from_local('~/dummy_checkpoint_used_to_test_load_from_local.pth', map_location=None)
assert_tensor_equal(checkpoint['state_dict']['block.conv.weight'], model.block.conv.weight)
os.remove(checkpoint_path)
|
@patch('torch.cuda.device_count', return_value=1)
@patch('torch.cuda.set_device')
@patch('torch.distributed.init_process_group')
@patch('subprocess.getoutput', return_value='127.0.0.1')
def test_init_dist(mock_getoutput, mock_dist_init, mock_set_device, mock_device_count):
with pytest.raises(ValueError):
init_dist('invaliad_launcher')
os.environ['SLURM_PROCID'] = '0'
os.environ['SLURM_NTASKS'] = '1'
os.environ['SLURM_NODELIST'] = '[0]'
init_dist('slurm')
assert (os.environ['MASTER_PORT'] == '29500')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
init_dist('slurm', port=29505)
assert (os.environ['MASTER_PORT'] == '29505')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
init_dist('slurm')
assert (os.environ['MASTER_PORT'] == '29505')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
|
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, (- 3), 4, 6]
def __getitem__(self, idx):
results = dict(x=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
|
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
acc = self.eval_result[self.index]
output = OrderedDict(acc=acc, index=self.index, score=acc, loss_top=acc)
self.index += 1
return output
|
class Model(nn.Module):
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.tensor([1.0]))
def forward(self, x, **kwargs):
return (self.param * x)
def train_step(self, data_batch, optimizer, **kwargs):
return {'loss': torch.sum(self(data_batch['x']))}
def val_step(self, data_batch, optimizer, **kwargs):
return {'loss': torch.sum(self(data_batch['x']))}
|
def _build_epoch_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
|
def _build_iter_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = IterBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
|
class EvalHook(BaseEvalHook):
_default_greater_keys = ['acc', 'top']
_default_less_keys = ['loss', 'loss_top']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
class DistEvalHook(BaseDistEvalHook):
greater_keys = ['acc', 'top']
less_keys = ['loss', 'loss_top']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
def test_eval_hook():
with pytest.raises(AssertionError):
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best=True)
with pytest.raises(TypeError):
test_dataset = Model()
data_loader = [DataLoader(test_dataset)]
EvalHook(data_loader)
with pytest.raises(ValueError):
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='unsupport')
with pytest.raises(KeyError):
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='auto', rule='unsupport')
with pytest.warns(UserWarning) as record_warnings:
class _EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
return {}
test_dataset = _EvalDataset()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best='auto')
runner = _build_epoch_runner()
runner.register_hook(eval_hook)
runner.run([data_loader], [('train', 1)], 1)
expected_message = 'Since `eval_res` is an empty dict, the behavior to save the best checkpoint will be skipped in this evaluation.'
for warning in record_warnings:
if (str(warning.message) == expected_message):
break
else:
assert False
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset)
model = Model()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with(test_dataset, [torch.tensor([1])], logger=runner.logger)
assert ((runner.meta is None) or ('best_score' not in runner.meta['hook_msgs']))
assert ((runner.meta is None) or ('best_ckpt' not in runner.meta['hook_msgs']))
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='loss_top')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_loss_top_epoch_6.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == (- 3))
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == (- 3))
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
old_ckpt_path = osp.join(tmpdir, 'best_acc_epoch_2.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path)
assert osp.exists(old_ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 4)
resume_from = old_ckpt_path
loader = DataLoader(ExampleDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
assert (runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path)
assert osp.exists(old_ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 4)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
assert (not osp.exists(old_ckpt_path))
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', test_fn=mock.MagicMock(return_value={}), greater_keys=[], less_keys=['acc'])
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == (- 3))
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
out_dir = 's3://user/data'
eval_hook = EvalHook(data_loader, interval=1, save_best='auto', out_dir=out_dir)
with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile, tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
ckpt_path = f'{out_dir}/{basename}/best_acc_epoch_4.pth'
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
assert (mock_put.call_count == 3)
assert (mock_remove.call_count == 2)
assert (mock_isfile.call_count == 2)
|
@patch('mmcv.engine.single_gpu_test', MagicMock)
@patch('mmcv.engine.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook])
@pytest.mark.parametrize('_build_demo_runner,by_epoch', [(_build_epoch_runner, True), (_build_iter_runner, False)])
def test_start_param(EvalHookParam, _build_demo_runner, by_epoch):
dataloader = DataLoader(EvalDataset())
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=(- 1))
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=(- 1))
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=(- 1))
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 1)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 2
else:
runner._iter = 2
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 1
else:
runner._iter = 1
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
|
@pytest.mark.parametrize('runner,by_epoch,eval_hook_priority', [(EpochBasedRunner, True, 'NORMAL'), (EpochBasedRunner, True, 'LOW'), (IterBasedRunner, False, 'LOW')])
def test_logger(runner, by_epoch, eval_hook_priority):
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, by_epoch=by_epoch, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_logger')
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
runner = EpochBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logger)
runner.register_logger_hooks(dict(interval=1, hooks=[dict(type='TextLoggerHook', by_epoch=by_epoch)]))
runner.register_timer_hook(dict(type='IterTimerHook'))
runner.register_hook(eval_hook, priority=eval_hook_priority)
runner.run([loader], [('train', 1)], 1)
path = osp.join(tmpdir, next(scandir(tmpdir, '.json')))
with open(path) as fr:
fr.readline()
train_log = json.loads(fr.readline())
assert ((train_log['mode'] == 'train') and ('time' in train_log))
val_log = json.loads(fr.readline())
assert ((val_log['mode'] == 'val') and ('time' not in val_log))
|
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.0])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == dst_type)
inputs = torch.FloatTensor([5.0])
src_type = torch.float
dst_type = torch.half
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == dst_type)
inputs = torch.IntTensor([5])
src_type = torch.float
dst_type = torch.half
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == inputs.dtype)
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.0])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(tensor_a=torch.FloatTensor([1.0]), tensor_b=torch.FloatTensor([2.0]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert (outputs['tensor_a'].dtype == dst_type)
assert (outputs['tensor_b'].dtype == dst_type)
inputs = [torch.FloatTensor([1.0]), torch.FloatTensor([2.0])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert (outputs[0].dtype == dst_type)
assert (outputs[1].dtype == dst_type)
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
|
def test_auto_fp16():
with pytest.raises(TypeError):
class ExampleObject(object):
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x',))
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.float32)
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
|
def test_force_fp32():
with pytest.raises(TypeError):
class ExampleObject(object):
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x',))
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.half)
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.half)
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
|
def test_optimizerhook():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
self.conv2 = nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
self.conv3 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return (x1, x2)
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = Mock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
optimizer_hook.after_train_iter(dummy_runner)
assert ('conv2.weight' in dummy_runner.logger.msg)
assert ('conv2.bias' in dummy_runner.logger.msg)
assert ('conv3.weight' in dummy_runner.logger.msg)
assert ('conv3.bias' in dummy_runner.logger.msg)
assert ('conv1.weight' not in dummy_runner.logger.msg)
assert ('conv1.bias' not in dummy_runner.logger.msg)
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner)
assert ('conv3.weight' in dummy_runner.logger.msg)
assert ('conv3.bias' in dummy_runner.logger.msg)
assert ('conv2.weight' not in dummy_runner.logger.msg)
assert ('conv2.bias' not in dummy_runner.logger.msg)
assert ('conv1.weight' not in dummy_runner.logger.msg)
assert ('conv1.bias' not in dummy_runner.logger.msg)
|
def test_checkpoint_hook(tmp_path):
'xdoctest -m tests/test_runner/test_hooks.py test_checkpoint_hook.'
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner('EpochBasedRunner', max_epochs=1)
runner.meta = dict()
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'epoch_1.pth'))
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner('EpochBasedRunner', max_epochs=4)
runner.meta = dict()
out_dir = 's3://user/data'
with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile:
checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=True, max_keep_ckpts=2)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'epoch_4.pth']))
mock_put.assert_called()
mock_remove.assert_called()
mock_isfile.assert_called()
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner('IterBasedRunner', max_iters=1, max_epochs=None)
runner.meta = dict()
checkpointhook = CheckpointHook(interval=1, by_epoch=False)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'iter_1.pth'))
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner('IterBasedRunner', max_iters=4, max_epochs=None)
runner.meta = dict()
out_dir = 's3://user/data'
with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile:
checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=False, max_keep_ckpts=2)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'iter_4.pth']))
mock_put.assert_called()
mock_remove.assert_called()
mock_isfile.assert_called()
shutil.rmtree(runner.work_dir)
|
def test_ema_hook():
'xdoctest -m tests/test_hooks.py test_ema_hook.'
class DemoModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=1, padding=1, bias=True)
self._init_weight()
def _init_weight(self):
constant_(self.conv.weight, 0)
constant_(self.conv.bias, 0)
def forward(self, x):
return self.conv(x).sum()
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
loader = DataLoader(torch.ones((1, 1, 1, 1)))
runner = _build_demo_runner()
demo_model = DemoModel()
runner.model = demo_model
emahook = EMAHook(momentum=0.1, interval=2, warm_up=100, resume_from=None)
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(emahook, priority='HIGHEST')
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth')
contain_ema_buffer = False
for (name, value) in checkpoint['state_dict'].items():
if ('ema' in name):
contain_ema_buffer = True
assert (value.sum() == 0)
value.fill_(1)
else:
assert (value.sum() == 0)
assert contain_ema_buffer
torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth')
work_dir = runner.work_dir
resume_ema_hook = EMAHook(momentum=0.5, warm_up=0, resume_from=f'{work_dir}/epoch_1.pth')
runner = _build_demo_runner(max_epochs=2)
runner.model = demo_model
runner.register_hook(resume_ema_hook, priority='HIGHEST')
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth')
contain_ema_buffer = False
for (name, value) in checkpoint['state_dict'].items():
if ('ema' in name):
contain_ema_buffer = True
assert (value.sum() == 2)
else:
assert (value.sum() == 1)
assert contain_ema_buffer
shutil.rmtree(runner.work_dir)
shutil.rmtree(work_dir)
|
def test_custom_hook():
@HOOKS.register_module()
class ToyHook(Hook):
def __init__(self, info, *args, **kwargs):
super().__init__()
self.info = info
runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1)
runner.register_custom_hooks(None)
assert (len(runner.hooks) == 0)
custom_hooks_cfg = [dict(type='ToyHook', priority=51, info=51), dict(type='ToyHook', priority=49, info=49)]
runner.register_custom_hooks(custom_hooks_cfg)
assert ([hook.info for hook in runner.hooks] == [49, 51])
runner.register_custom_hooks(ToyHook(info='default'))
assert ((len(runner.hooks) == 3) and (runner.hooks[1].info == 'default'))
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1)
priority_ranks = ['HIGHEST', 'VERY_HIGH', 'HIGH', 'ABOVE_NORMAL', 'NORMAL', 'BELOW_NORMAL', 'LOW', 'VERY_LOW', 'LOWEST']
random_priority_ranks = priority_ranks.copy()
random.shuffle(random_priority_ranks)
custom_hooks_cfg = [dict(type='ToyHook', priority=rank, info=rank) for rank in random_priority_ranks]
runner.register_custom_hooks(custom_hooks_cfg)
assert ([hook.info for hook in runner.hooks] == priority_ranks)
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1)
custom_hooks_cfg = [dict(type='ToyHook', priority=1, info='custom 1'), dict(type='ToyHook', priority='NORMAL', info='custom normal'), dict(type='ToyHook', priority=89, info='custom 89')]
runner.register_training_hooks(lr_config=ToyHook('lr'), optimizer_config=ToyHook('optimizer'), checkpoint_config=ToyHook('checkpoint'), log_config=dict(interval=1, hooks=[dict(type='ToyHook', info='log')]), momentum_config=ToyHook('momentum'), timer_config=ToyHook('timer'), custom_hooks_config=custom_hooks_cfg)
hooks_order = ['custom 1', 'lr', 'momentum', 'optimizer', 'checkpoint', 'custom normal', 'timer', 'custom 89', 'log']
assert ([hook.info for hook in runner.hooks] == hooks_order)
shutil.rmtree(runner.work_dir)
|
def test_pavi_hook():
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
runner.meta = dict(config_dict=dict(lr=0.02, gpu_ids=range(1)))
hook = PaviLoggerHook(add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
hook.writer.add_scalars.assert_called_with('val', {'learning_rate': 0.02, 'momentum': 0.95}, 1)
if (platform.system() == 'Windows'):
snapshot_file_path = osp.join(runner.work_dir, 'latest.pth')
else:
snapshot_file_path = osp.join(runner.work_dir, 'epoch_1.pth')
hook.writer.add_snapshot_file.assert_called_with(tag=runner.work_dir.split('/')[(- 1)], snapshot_file_path=snapshot_file_path, iteration=1)
|
def test_sync_buffers_hook():
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
runner.register_hook_from_cfg(dict(type='SyncBuffersHook'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
|
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)])
def test_momentum_runner_hook(multi_optimizers, max_iters, gamma, cyclic_times):
'xdoctest -m tests/test_hooks.py test_momentum_runner_hook.'
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='CyclicMomentumUpdaterHook', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=cyclic_times, step_ratio_up=0.4, gamma=gamma)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='CyclicLrUpdaterHook', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.01999999999999999, 'learning_rate/model2': 0.009999999999999995, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.2, 'learning_rate/model2': 0.1, 'momentum/model1': 0.85, 'momentum/model2': 0.8052631578947369}, 5), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.875, 'momentum/model2': 0.8289473684210527}, 7)]
else:
calls = [call('train', {'learning_rate': 0.01999999999999999, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.11, 'momentum': 0.85}, 3), call('train', {'learning_rate': 0.1879422863405995, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.11000000000000001, 'momentum': 0.9}, 8)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='constant', warmup_iters=5, warmup_ratio=0.5, step=[10])
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 5), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 5), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='linear', warmup_iters=5, warmup_ratio=0.5, step=[10])
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.3571428571428572, 'momentum/model2': 1.2857142857142858}, 3), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.3571428571428572}, 3), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='exp', warmup_iters=5, warmup_ratio=0.5, step=[10])
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.4399307381848783, 'momentum/model2': 1.3641449098593583}, 3), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.4399307381848783}, 3), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers', (True, False))
def test_cosine_runner_hook(multi_optimizers):
'xdoctest -m tests/test_hooks.py test_cosine_runner_hook.'
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='CosineAnnealingMomentumUpdaterHook', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95))
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='CosineAnnealingLrUpdaterHook', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.97, 'momentum/model2': 0.9189473684210527}, 6), call('train', {'learning_rate/model1': 0.0004894348370484647, 'learning_rate/model2': 0.00024471741852423234, 'momentum/model1': 0.9890211303259032, 'momentum/model2': 0.9369673866245399}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.97}, 6), call('train', {'learning_rate': 0.0004894348370484647, 'momentum': 0.9890211303259032}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers, by_epoch', [(False, False), (True, False), (False, True), (True, True)])
def test_flat_cosine_runner_hook(multi_optimizers, by_epoch):
'xdoctest -m tests/test_hooks.py test_flat_cosine_runner_hook.'
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
max_epochs = (10 if by_epoch else 1)
runner = _build_demo_runner(multi_optimizers=multi_optimizers, max_epochs=max_epochs)
with pytest.raises(ValueError):
FlatCosineAnnealingLrUpdaterHook(start_percent=(- 0.1), min_lr_ratio=0)
hook_cfg = dict(type='FlatCosineAnnealingLrUpdaterHook', by_epoch=by_epoch, min_lr_ratio=0, warmup='linear', warmup_iters=(10 if by_epoch else 2), warmup_ratio=0.9, start_percent=0.5)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
if by_epoch:
calls = [call('train', {'learning_rate/model1': 0.018000000000000002, 'learning_rate/model2': 0.009000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 11), call('train', {'learning_rate/model1': 0.018090169943749474, 'learning_rate/model2': 0.009045084971874737, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 61), call('train', {'learning_rate/model1': 0.0019098300562505265, 'learning_rate/model2': 0.0009549150281252633, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 100)]
else:
calls = [call('train', {'learning_rate/model1': 0.018000000000000002, 'learning_rate/model2': 0.009000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0.018090169943749474, 'learning_rate/model2': 0.009045084971874737, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 7), call('train', {'learning_rate/model1': 0.0019098300562505265, 'learning_rate/model2': 0.0009549150281252633, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
elif by_epoch:
calls = [call('train', {'learning_rate': 0.018000000000000002, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 11), call('train', {'learning_rate': 0.018090169943749474, 'momentum': 0.95}, 61), call('train', {'learning_rate': 0.0019098300562505265, 'momentum': 0.95}, 100)]
else:
calls = [call('train', {'learning_rate': 0.018000000000000002, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.018090169943749474, 'momentum': 0.95}, 7), call('train', {'learning_rate': 0.0019098300562505265, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers, max_iters', [(True, 10), (True, 2), (False, 10), (False, 2)])
def test_one_cycle_runner_hook(multi_optimizers, max_iters):
'Test OneCycleLrUpdaterHook and OneCycleMomentumUpdaterHook.'
with pytest.raises(AssertionError):
OneCycleLrUpdaterHook(max_lr=0.1, by_epoch=True)
with pytest.raises(ValueError):
OneCycleLrUpdaterHook(max_lr=0.1, pct_start=(- 0.1))
with pytest.raises(ValueError):
OneCycleLrUpdaterHook(max_lr=0.1, anneal_strategy='sin')
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='OneCycleMomentumUpdaterHook', base_momentum=0.85, max_momentum=0.95, pct_start=0.5, anneal_strategy='cos', three_phase=False)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='OneCycleLrUpdaterHook', max_lr=0.01, pct_start=0.5, anneal_strategy='cos', div_factor=25, final_div_factor=10000.0, three_phase=False)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.0003999999999999993, 'learning_rate/model2': 0.0003999999999999993, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 1), call('train', {'learning_rate/model1': 0.00904508879153485, 'learning_rate/model2': 0.00904508879153485, 'momentum/model1': 0.8595491502812526, 'momentum/model2': 0.8595491502812526}, 6), call('train', {'learning_rate/model1': 4e-08, 'learning_rate/model2': 4e-08, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 10)]
else:
calls = [call('train', {'learning_rate': 0.0003999999999999993, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.00904508879153485, 'momentum': 0.8595491502812526}, 6), call('train', {'learning_rate': 4e-08, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters)
args = dict(max_lr=0.01, total_steps=5, pct_start=0.5, anneal_strategy='linear', div_factor=25, final_div_factor=10000.0)
hook = OneCycleLrUpdaterHook(**args)
runner.register_hook(hook)
if (max_iters == 10):
with pytest.raises(ValueError):
runner.run([loader], [('train', 1)])
else:
runner.run([loader], [('train', 1)])
lr_last = runner.current_lr()
t = torch.tensor([0.0], requires_grad=True)
optim = torch.optim.SGD([t], lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optim, **args)
lr_target = []
for _ in range(max_iters):
optim.step()
lr_target.append(optim.param_groups[0]['lr'])
lr_scheduler.step()
assert (lr_target[(- 1)] == lr_last[0])
|
@pytest.mark.parametrize('multi_optimizers', (True, False))
def test_cosine_restart_lr_update_hook(multi_optimizers):
'Test CosineRestartLrUpdaterHook.'
with pytest.raises(AssertionError):
CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5, 0.5], min_lr=0.1, min_lr_ratio=0)
with pytest.raises(AssertionError):
CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5], min_lr_ratio=0)
with pytest.raises(ValueError):
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner()
hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 2], restart_weights=[0.5, 0.5], min_lr=0.0001)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 5], restart_weights=[0.5, 0.5], min_lr_ratio=0)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0.0009549150281252633, 'learning_rate/model2': 0.00047745751406263163, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.0009549150281252633, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers', (True, False))
def test_step_runner_hook(multi_optimizers):
'Test StepLrUpdaterHook.'
with pytest.raises(TypeError):
StepLrUpdaterHook()
with pytest.raises(AssertionError):
StepLrUpdaterHook((- 10))
with pytest.raises(AssertionError):
StepLrUpdaterHook([10, 16, (- 20)])
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((30, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, step=5, gamma=0.5, min_momentum=0.05)
runner.register_hook_from_cfg(hook_cfg)
hook = StepLrUpdaterHook(by_epoch=False, step=5, gamma=0.5, min_lr=0.001)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.475, 'momentum/model2': 0.45}, 6), call('train', {'learning_rate/model1': 0.0025, 'learning_rate/model2': 0.00125, 'momentum/model1': 0.11875, 'momentum/model2': 0.1125}, 16), call('train', {'learning_rate/model1': 0.00125, 'learning_rate/model2': 0.001, 'momentum/model1': 0.059375, 'momentum/model2': 0.05625}, 21), call('train', {'learning_rate/model1': 0.001, 'learning_rate/model2': 0.001, 'momentum/model1': 0.05, 'momentum/model2': 0.05}, 26), call('train', {'learning_rate/model1': 0.001, 'learning_rate/model2': 0.001, 'momentum/model1': 0.05, 'momentum/model2': 0.05}, 30)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.475}, 6), call('train', {'learning_rate': 0.0025, 'momentum': 0.11875}, 16), call('train', {'learning_rate': 0.00125, 'momentum': 0.059375}, 21), call('train', {'learning_rate': 0.001, 'momentum': 0.05}, 26), call('train', {'learning_rate': 0.001, 'momentum': 0.05}, 30)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, step=[4, 6, 8], gamma=0.1)
runner.register_hook_from_cfg(hook_cfg)
hook = StepLrUpdaterHook(by_epoch=False, step=[4, 6, 8], gamma=0.1)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.002, 'learning_rate/model2': 0.001, 'momentum/model1': 0.095, 'momentum/model2': 0.09000000000000001}, 5), call('train', {'learning_rate/model1': 0.00020000000000000004, 'learning_rate/model2': 0.00010000000000000002, 'momentum/model1': 0.009500000000000001, 'momentum/model2': 0.009000000000000003}, 7), call('train', {'learning_rate/model1': 2.0000000000000005e-05, 'learning_rate/model2': 1.0000000000000003e-05, 'momentum/model1': 0.0009500000000000002, 'momentum/model2': 0.0009000000000000002}, 9)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.002, 'momentum': 0.095}, 5), call('train', {'learning_rate': 0.00020000000000000004, 'momentum': 0.009500000000000001}, 7), call('train', {'learning_rate': 2.0000000000000005e-05, 'momentum': 0.0009500000000000002}, 9)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)])
def test_cyclic_lr_update_hook(multi_optimizers, max_iters, gamma, cyclic_times):
'Test CyclicLrUpdateHook.'
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=True)
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=False, target_ratio=(10.0, 0.1, 0.2))
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=False, step_ratio_up=1.4)
with pytest.raises(ValueError):
CyclicLrUpdaterHook(by_epoch=False, anneal_strategy='sin')
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=False, gamma=0)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters, multi_optimizers=multi_optimizers)
schedule_hook = CyclicLrUpdaterHook(by_epoch=False, target_ratio=(10.0, 1.0), cyclic_times=cyclic_times, step_ratio_up=0.5, anneal_strategy='linear', gamma=gamma)
runner.register_hook(schedule_hook)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 4), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.11, 'momentum': 0.95}, 4), call('train', {'learning_rate': 0.065, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.11, 'momentum': 0.95}, 7)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('log_model', (True, False))
def test_mlflow_hook(log_model):
sys.modules['mlflow'] = MagicMock()
sys.modules['mlflow.pytorch'] = MagicMock()
runner = _build_demo_runner()
loader = DataLoader(torch.ones((5, 2)))
hook = MlflowLoggerHook(exp_name='test', log_model=log_model)
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.mlflow.set_experiment.assert_called_with('test')
hook.mlflow.log_metrics.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6)
if log_model:
hook.mlflow_pytorch.log_model.assert_called_with(runner.model, 'models', pip_requirements=[f'torch=={TORCH_VERSION}'])
else:
assert (not hook.mlflow_pytorch.log_model.called)
|
def test_segmind_hook():
sys.modules['segmind'] = MagicMock()
runner = _build_demo_runner()
hook = SegmindLoggerHook()
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.mlflow_log.assert_called_with(hook.log_metrics, {'learning_rate': 0.02, 'momentum': 0.95}, step=runner.epoch, epoch=runner.epoch)
|
def test_wandb_hook():
sys.modules['wandb'] = MagicMock()
runner = _build_demo_runner()
hook = WandbLoggerHook(log_artifact=True)
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.wandb.init.assert_called_with()
hook.wandb.log.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6, commit=True)
hook.wandb.log_artifact.assert_called()
hook.wandb.join.assert_called_with()
|
def test_neptune_hook():
sys.modules['neptune'] = MagicMock()
sys.modules['neptune.new'] = MagicMock()
runner = _build_demo_runner()
hook = NeptuneLoggerHook()
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.neptune.init.assert_called_with()
hook.run['momentum'].log.assert_called_with(0.95, step=6)
hook.run.stop.assert_called_with()
|
def test_dvclive_hook():
sys.modules['dvclive'] = MagicMock()
runner = _build_demo_runner()
hook = DvcliveLoggerHook()
dvclive_mock = hook.dvclive
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
dvclive_mock.set_step.assert_called_with(6)
dvclive_mock.log.assert_called_with('momentum', 0.95)
|
def test_dvclive_hook_model_file(tmp_path):
sys.modules['dvclive'] = MagicMock()
runner = _build_demo_runner()
hook = DvcliveLoggerHook(model_file=osp.join(runner.work_dir, 'model.pth'))
runner.register_hook(hook)
loader = torch.utils.data.DataLoader(torch.ones((5, 2)))
loader = DataLoader(torch.ones((5, 2)))
runner.run([loader, loader], [('train', 1), ('val', 1)])
assert osp.exists(osp.join(runner.work_dir, 'model.pth'))
shutil.rmtree(runner.work_dir)
|
def _build_demo_runner_without_hook(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
if multi_optimizers:
optimizer = {'model1': torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95), 'model2': torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9)}
else:
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(dict(type=runner_type), default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), max_epochs=max_epochs, max_iters=max_iters))
return runner
|
def _build_demo_runner(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False):
log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')])
runner = _build_demo_runner_without_hook(runner_type, max_epochs, max_iters, multi_optimizers)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
|
def test_runner_with_revise_keys():
import os
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
class PrefixModel(nn.Module):
def __init__(self):
super().__init__()
self.backbone = Model()
pmodel = PrefixModel()
model = Model()
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
torch.save(model.state_dict(), checkpoint_path)
runner = _build_demo_runner(runner_type='EpochBasedRunner')
runner.model = pmodel
state_dict = runner.load_checkpoint(checkpoint_path, revise_keys=[('^', 'backbone.')])
for key in pmodel.backbone.state_dict().keys():
assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key])
torch.save(pmodel.state_dict(), checkpoint_path)
runner.model = model
state_dict = runner.load_checkpoint(checkpoint_path, revise_keys=[('^backbone\\.', '')])
for key in state_dict.keys():
key_stripped = re.sub('^backbone\\.', '', key)
assert torch.equal(model.state_dict()[key_stripped], state_dict[key])
os.remove(checkpoint_path)
|
def test_get_triggered_stages():
class ToyHook(Hook):
def before_run():
pass
def after_epoch():
pass
hook = ToyHook()
expected_stages = ['before_run', 'after_train_epoch', 'after_val_epoch']
assert (hook.get_triggered_stages() == expected_stages)
|
def test_gradient_cumulative_optimizer_hook():
class ToyModel(nn.Module):
def __init__(self, with_norm=False):
super().__init__()
self.fp16_enabled = False
self.fc = nn.Linear(3, 2)
nn.init.constant_(self.fc.weight, 1.0)
nn.init.constant_(self.fc.bias, 1.0)
self.with_norm = with_norm
if with_norm:
self.norm = nn.BatchNorm1d(2)
def forward(self, x):
x = self.fc(x)
if self.with_norm:
x = self.norm(x)
return x
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def build_toy_runner(config=dict(type='EpochBasedRunner', max_epochs=3)):
model = ToyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(config, default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), meta=dict()))
return runner
with pytest.raises(AssertionError):
GradientCumulativeOptimizerHook(cumulative_iters='str')
with pytest.raises(AssertionError):
GradientCumulativeOptimizerHook(cumulative_iters=(- 1))
data = torch.rand((6, 3))
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner()
optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2 = DataLoader(data, batch_size=3)
runner_2 = build_toy_runner()
optimizer_hook = OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2], [('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
data = torch.rand((8, 3))
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner(dict(type='IterBasedRunner', max_iters=8))
optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2_divisible = DataLoader(data[:6], batch_size=3)
loader_2_remainder = DataLoader(data[6:], batch_size=2)
runner_2 = build_toy_runner(dict(type='IterBasedRunner', max_iters=3))
optimizer_hook = OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2_divisible, loader_2_remainder], [('train', 2), ('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
model = ToyModel(with_norm=True)
optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
assert optimizer_hook.has_batch_norm(model)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_gradient_cumulative_fp16_optimizer_hook():
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.fp16_enabled = False
self.fc = nn.Linear(3, 2)
nn.init.constant_(self.fc.weight, 1.0)
nn.init.constant_(self.fc.bias, 1.0)
@auto_fp16(apply_to=('x',))
def forward(self, x):
x = self.fc(x)
return x
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def build_toy_runner(config=dict(type='EpochBasedRunner', max_epochs=3)):
model = ToyModel().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(config, default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), meta=dict()))
return runner
data = torch.rand((6, 3)).cuda()
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner()
optimizer_hook = GradientCumulativeFp16OptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2 = DataLoader(data, batch_size=3)
runner_2 = build_toy_runner()
optimizer_hook = Fp16OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2], [('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
data = torch.rand((8, 3)).cuda()
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner(dict(type='IterBasedRunner', max_iters=8))
optimizer_hook = GradientCumulativeFp16OptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2_divisible = DataLoader(data[:6], batch_size=3)
loader_2_remainder = DataLoader(data[6:], batch_size=2)
runner_2 = build_toy_runner(dict(type='IterBasedRunner', max_iters=3))
optimizer_hook = Fp16OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2_divisible, loader_2_remainder], [('train', 2), ('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
|
class SubModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2)
self.gn = nn.GroupNorm(2, 2)
self.param1 = nn.Parameter(torch.ones(1))
def forward(self, x):
return x
|
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
if OPS_AVAILABLE:
from mmcv.ops import DeformConv2dPack
self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1)
def forward(self, x):
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.