code stringlengths 17 6.64M |
|---|
@patch('torch.cuda.device_count', return_value=1)
@patch('torch.cuda.set_device')
@patch('torch.distributed.init_process_group')
@patch('subprocess.getoutput', return_value='127.0.0.1')
def test_init_dist(mock_getoutput, mock_dist_init, mock_set_device, mock_device_count):
with pytest.raises(ValueError):
init_dist('invaliad_launcher')
os.environ['SLURM_PROCID'] = '0'
os.environ['SLURM_NTASKS'] = '1'
os.environ['SLURM_NODELIST'] = '[0]'
init_dist('slurm')
assert (os.environ['MASTER_PORT'] == '29500')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
init_dist('slurm', port=29505)
assert (os.environ['MASTER_PORT'] == '29505')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
init_dist('slurm')
assert (os.environ['MASTER_PORT'] == '29505')
assert (os.environ['MASTER_ADDR'] == '127.0.0.1')
assert (os.environ['WORLD_SIZE'] == '1')
assert (os.environ['RANK'] == '0')
mock_set_device.assert_called_with(0)
mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1')
mock_dist_init.assert_called_with(backend='nccl')
|
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, (- 3), 4, 6]
def __getitem__(self, idx):
results = dict(x=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
|
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
acc = self.eval_result[self.index]
output = OrderedDict(acc=acc, index=self.index, score=acc, loss_top=acc)
self.index += 1
return output
|
class Model(nn.Module):
def __init__(self):
super().__init__()
self.param = nn.Parameter(torch.tensor([1.0]))
def forward(self, x, **kwargs):
return (self.param * x)
def train_step(self, data_batch, optimizer, **kwargs):
return {'loss': torch.sum(self(data_batch['x']))}
def val_step(self, data_batch, optimizer, **kwargs):
return {'loss': torch.sum(self(data_batch['x']))}
|
def _build_epoch_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
|
def _build_iter_runner():
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = IterBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo'))
return runner
|
class EvalHook(BaseEvalHook):
_default_greater_keys = ['acc', 'top']
_default_less_keys = ['loss', 'loss_top']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
class DistEvalHook(BaseDistEvalHook):
greater_keys = ['acc', 'top']
less_keys = ['loss', 'loss_top']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
def test_eval_hook():
with pytest.raises(AssertionError):
test_dataset = Model()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best=True)
with pytest.raises(TypeError):
test_dataset = Model()
data_loader = [DataLoader(test_dataset)]
EvalHook(data_loader)
with pytest.raises(ValueError):
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='unsupport')
with pytest.raises(KeyError):
test_dataset = ExampleDataset()
data_loader = DataLoader(test_dataset)
EvalHook(data_loader, save_best='auto', rule='unsupport')
with pytest.warns(UserWarning) as record_warnings:
class _EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
return {}
test_dataset = _EvalDataset()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best='auto')
runner = _build_epoch_runner()
runner.register_hook(eval_hook)
runner.run([data_loader], [('train', 1)], 1)
expected_message = 'Since `eval_res` is an empty dict, the behavior to save the best checkpoint will be skipped in this evaluation.'
for warning in record_warnings:
if (str(warning.message) == expected_message):
break
else:
assert False
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset)
model = Model()
data_loader = DataLoader(test_dataset)
eval_hook = EvalHook(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
test_dataset.evaluate.assert_called_with(test_dataset, [torch.tensor([1])], logger=runner.logger)
assert ((runner.meta is None) or ('best_score' not in runner.meta['hook_msgs']))
assert ((runner.meta is None) or ('best_ckpt' not in runner.meta['hook_msgs']))
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='loss_top')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_loss_top_epoch_6.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == (- 3))
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == (- 3))
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
old_ckpt_path = osp.join(tmpdir, 'best_acc_epoch_2.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path)
assert osp.exists(old_ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 4)
resume_from = old_ckpt_path
loader = DataLoader(ExampleDataset())
eval_hook = EvalHook(data_loader, save_best='acc')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
assert (runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path)
assert osp.exists(old_ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 4)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
assert (not osp.exists(old_ckpt_path))
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, save_best='acc', test_fn=mock.MagicMock(return_value={}), greater_keys=[], less_keys=['acc'])
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth')
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert osp.exists(ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == (- 3))
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
out_dir = 's3://user/data'
eval_hook = EvalHook(data_loader, interval=1, save_best='auto', out_dir=out_dir)
with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile, tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
ckpt_path = f'{out_dir}/{basename}/best_acc_epoch_4.pth'
assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path)
assert (runner.meta['hook_msgs']['best_score'] == 7)
assert (mock_put.call_count == 3)
assert (mock_remove.call_count == 2)
assert (mock_isfile.call_count == 2)
|
@patch('mmcv.engine.single_gpu_test', MagicMock)
@patch('mmcv.engine.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook])
@pytest.mark.parametrize('_build_demo_runner,by_epoch', [(_build_epoch_runner, True), (_build_iter_runner, False)])
def test_start_param(EvalHookParam, _build_demo_runner, by_epoch):
dataloader = DataLoader(EvalDataset())
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=(- 1))
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=(- 1))
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=(- 1))
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 1)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert (evalhook.evaluate.call_count == 3)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 2
else:
runner._iter = 2
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2, by_epoch=by_epoch)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
if by_epoch:
runner._epoch = 1
else:
runner._iter = 1
runner.run([dataloader], [('train', 1)], 3)
assert (evalhook.evaluate.call_count == 2)
|
@pytest.mark.parametrize('runner,by_epoch,eval_hook_priority', [(EpochBasedRunner, True, 'NORMAL'), (EpochBasedRunner, True, 'LOW'), (IterBasedRunner, False, 'LOW')])
def test_logger(runner, by_epoch, eval_hook_priority):
loader = DataLoader(EvalDataset())
model = Model()
data_loader = DataLoader(EvalDataset())
eval_hook = EvalHook(data_loader, interval=1, by_epoch=by_epoch, save_best='acc')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_logger')
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
runner = EpochBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logger)
runner.register_logger_hooks(dict(interval=1, hooks=[dict(type='TextLoggerHook', by_epoch=by_epoch)]))
runner.register_timer_hook(dict(type='IterTimerHook'))
runner.register_hook(eval_hook, priority=eval_hook_priority)
runner.run([loader], [('train', 1)], 1)
path = osp.join(tmpdir, next(scandir(tmpdir, '.json')))
with open(path) as fr:
fr.readline()
train_log = json.loads(fr.readline())
assert ((train_log['mode'] == 'train') and ('time' in train_log))
val_log = json.loads(fr.readline())
assert ((val_log['mode'] == 'val') and ('time' not in val_log))
|
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.0])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == dst_type)
inputs = torch.FloatTensor([5.0])
src_type = torch.float
dst_type = torch.half
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == dst_type)
inputs = torch.IntTensor([5])
src_type = torch.float
dst_type = torch.half
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == inputs.dtype)
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.0])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(tensor_a=torch.FloatTensor([1.0]), tensor_b=torch.FloatTensor([2.0]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert (outputs['tensor_a'].dtype == dst_type)
assert (outputs['tensor_b'].dtype == dst_type)
inputs = [torch.FloatTensor([1.0]), torch.FloatTensor([2.0])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert (outputs[0].dtype == dst_type)
assert (outputs[1].dtype == dst_type)
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
|
def test_auto_fp16():
with pytest.raises(TypeError):
class ExampleObject(object):
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x',))
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.float32)
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.float32)
|
def test_force_fp32():
with pytest.raises(TypeError):
class ExampleObject(object):
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x',))
def forward(self, x, y):
return (x, y)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y) = model(input_x, input_y)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y) = model(input_x.cuda(), input_y.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.half)
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.float32)
assert (output_z.dtype == torch.half)
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return (x, y, z)
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.float32)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
model.fp16_enabled = True
(output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z)
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
if torch.cuda.is_available():
model.cuda()
(output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert (output_x.dtype == torch.half)
assert (output_y.dtype == torch.half)
assert (output_z.dtype == torch.half)
|
def test_optimizerhook():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
self.conv2 = nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
self.conv3 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return (x1, x2)
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = Mock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
optimizer_hook.after_train_iter(dummy_runner)
assert ('conv2.weight' in dummy_runner.logger.msg)
assert ('conv2.bias' in dummy_runner.logger.msg)
assert ('conv3.weight' in dummy_runner.logger.msg)
assert ('conv3.bias' in dummy_runner.logger.msg)
assert ('conv1.weight' not in dummy_runner.logger.msg)
assert ('conv1.bias' not in dummy_runner.logger.msg)
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner)
assert ('conv3.weight' in dummy_runner.logger.msg)
assert ('conv3.bias' in dummy_runner.logger.msg)
assert ('conv2.weight' not in dummy_runner.logger.msg)
assert ('conv2.bias' not in dummy_runner.logger.msg)
assert ('conv1.weight' not in dummy_runner.logger.msg)
assert ('conv1.bias' not in dummy_runner.logger.msg)
|
def test_checkpoint_hook(tmp_path):
'xdoctest -m tests/test_runner/test_hooks.py test_checkpoint_hook.'
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner('EpochBasedRunner', max_epochs=1)
runner.meta = dict()
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'epoch_1.pth'))
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner('EpochBasedRunner', max_epochs=4)
runner.meta = dict()
out_dir = 's3://user/data'
with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile:
checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=True, max_keep_ckpts=2)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'epoch_4.pth']))
mock_put.assert_called()
mock_remove.assert_called()
mock_isfile.assert_called()
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner('IterBasedRunner', max_iters=1, max_epochs=None)
runner.meta = dict()
checkpointhook = CheckpointHook(interval=1, by_epoch=False)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'iter_1.pth'))
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner('IterBasedRunner', max_iters=4, max_epochs=None)
runner.meta = dict()
out_dir = 's3://user/data'
with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile:
checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=False, max_keep_ckpts=2)
runner.register_hook(checkpointhook)
runner.run([loader], [('train', 1)])
basename = osp.basename(runner.work_dir.rstrip(osp.sep))
assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'iter_4.pth']))
mock_put.assert_called()
mock_remove.assert_called()
mock_isfile.assert_called()
shutil.rmtree(runner.work_dir)
|
def test_ema_hook():
'xdoctest -m tests/test_hooks.py test_ema_hook.'
class DemoModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=1, padding=1, bias=True)
self._init_weight()
def _init_weight(self):
constant_(self.conv.weight, 0)
constant_(self.conv.bias, 0)
def forward(self, x):
return self.conv(x).sum()
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
loader = DataLoader(torch.ones((1, 1, 1, 1)))
runner = _build_demo_runner()
demo_model = DemoModel()
runner.model = demo_model
emahook = EMAHook(momentum=0.1, interval=2, warm_up=100, resume_from=None)
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(emahook, priority='HIGHEST')
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth')
contain_ema_buffer = False
for (name, value) in checkpoint['state_dict'].items():
if ('ema' in name):
contain_ema_buffer = True
assert (value.sum() == 0)
value.fill_(1)
else:
assert (value.sum() == 0)
assert contain_ema_buffer
torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth')
work_dir = runner.work_dir
resume_ema_hook = EMAHook(momentum=0.5, warm_up=0, resume_from=f'{work_dir}/epoch_1.pth')
runner = _build_demo_runner(max_epochs=2)
runner.model = demo_model
runner.register_hook(resume_ema_hook, priority='HIGHEST')
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth')
contain_ema_buffer = False
for (name, value) in checkpoint['state_dict'].items():
if ('ema' in name):
contain_ema_buffer = True
assert (value.sum() == 2)
else:
assert (value.sum() == 1)
assert contain_ema_buffer
shutil.rmtree(runner.work_dir)
shutil.rmtree(work_dir)
|
def test_custom_hook():
@HOOKS.register_module()
class ToyHook(Hook):
def __init__(self, info, *args, **kwargs):
super().__init__()
self.info = info
runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1)
runner.register_custom_hooks(None)
assert (len(runner.hooks) == 0)
custom_hooks_cfg = [dict(type='ToyHook', priority=51, info=51), dict(type='ToyHook', priority=49, info=49)]
runner.register_custom_hooks(custom_hooks_cfg)
assert ([hook.info for hook in runner.hooks] == [49, 51])
runner.register_custom_hooks(ToyHook(info='default'))
assert ((len(runner.hooks) == 3) and (runner.hooks[1].info == 'default'))
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1)
priority_ranks = ['HIGHEST', 'VERY_HIGH', 'HIGH', 'ABOVE_NORMAL', 'NORMAL', 'BELOW_NORMAL', 'LOW', 'VERY_LOW', 'LOWEST']
random_priority_ranks = priority_ranks.copy()
random.shuffle(random_priority_ranks)
custom_hooks_cfg = [dict(type='ToyHook', priority=rank, info=rank) for rank in random_priority_ranks]
runner.register_custom_hooks(custom_hooks_cfg)
assert ([hook.info for hook in runner.hooks] == priority_ranks)
shutil.rmtree(runner.work_dir)
runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1)
custom_hooks_cfg = [dict(type='ToyHook', priority=1, info='custom 1'), dict(type='ToyHook', priority='NORMAL', info='custom normal'), dict(type='ToyHook', priority=89, info='custom 89')]
runner.register_training_hooks(lr_config=ToyHook('lr'), optimizer_config=ToyHook('optimizer'), checkpoint_config=ToyHook('checkpoint'), log_config=dict(interval=1, hooks=[dict(type='ToyHook', info='log')]), momentum_config=ToyHook('momentum'), timer_config=ToyHook('timer'), custom_hooks_config=custom_hooks_cfg)
hooks_order = ['custom 1', 'lr', 'momentum', 'optimizer', 'checkpoint', 'custom normal', 'timer', 'custom 89', 'log']
assert ([hook.info for hook in runner.hooks] == hooks_order)
shutil.rmtree(runner.work_dir)
|
def test_pavi_hook():
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
runner.meta = dict(config_dict=dict(lr=0.02, gpu_ids=range(1)))
hook = PaviLoggerHook(add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
hook.writer.add_scalars.assert_called_with('val', {'learning_rate': 0.02, 'momentum': 0.95}, 1)
if (platform.system() == 'Windows'):
snapshot_file_path = osp.join(runner.work_dir, 'latest.pth')
else:
snapshot_file_path = osp.join(runner.work_dir, 'epoch_1.pth')
hook.writer.add_snapshot_file.assert_called_with(tag=runner.work_dir.split('/')[(- 1)], snapshot_file_path=snapshot_file_path, iteration=1)
|
def test_sync_buffers_hook():
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
runner.register_hook_from_cfg(dict(type='SyncBuffersHook'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
|
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)])
def test_momentum_runner_hook(multi_optimizers, max_iters, gamma, cyclic_times):
'xdoctest -m tests/test_hooks.py test_momentum_runner_hook.'
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='CyclicMomentumUpdaterHook', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=cyclic_times, step_ratio_up=0.4, gamma=gamma)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='CyclicLrUpdaterHook', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.01999999999999999, 'learning_rate/model2': 0.009999999999999995, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.2, 'learning_rate/model2': 0.1, 'momentum/model1': 0.85, 'momentum/model2': 0.8052631578947369}, 5), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.875, 'momentum/model2': 0.8289473684210527}, 7)]
else:
calls = [call('train', {'learning_rate': 0.01999999999999999, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.11, 'momentum': 0.85}, 3), call('train', {'learning_rate': 0.1879422863405995, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.11000000000000001, 'momentum': 0.9}, 8)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='constant', warmup_iters=5, warmup_ratio=0.5, step=[10])
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 5), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 5), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='linear', warmup_iters=5, warmup_ratio=0.5, step=[10])
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.3571428571428572, 'momentum/model2': 1.2857142857142858}, 3), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.3571428571428572}, 3), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='exp', warmup_iters=5, warmup_ratio=0.5, step=[10])
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.4399307381848783, 'momentum/model2': 1.3641449098593583}, 3), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.4399307381848783}, 3), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers', (True, False))
def test_cosine_runner_hook(multi_optimizers):
'xdoctest -m tests/test_hooks.py test_cosine_runner_hook.'
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='CosineAnnealingMomentumUpdaterHook', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95))
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='CosineAnnealingLrUpdaterHook', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.97, 'momentum/model2': 0.9189473684210527}, 6), call('train', {'learning_rate/model1': 0.0004894348370484647, 'learning_rate/model2': 0.00024471741852423234, 'momentum/model1': 0.9890211303259032, 'momentum/model2': 0.9369673866245399}, 10)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.97}, 6), call('train', {'learning_rate': 0.0004894348370484647, 'momentum': 0.9890211303259032}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers, by_epoch', [(False, False), (True, False), (False, True), (True, True)])
def test_flat_cosine_runner_hook(multi_optimizers, by_epoch):
'xdoctest -m tests/test_hooks.py test_flat_cosine_runner_hook.'
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
max_epochs = (10 if by_epoch else 1)
runner = _build_demo_runner(multi_optimizers=multi_optimizers, max_epochs=max_epochs)
with pytest.raises(ValueError):
FlatCosineAnnealingLrUpdaterHook(start_percent=(- 0.1), min_lr_ratio=0)
hook_cfg = dict(type='FlatCosineAnnealingLrUpdaterHook', by_epoch=by_epoch, min_lr_ratio=0, warmup='linear', warmup_iters=(10 if by_epoch else 2), warmup_ratio=0.9, start_percent=0.5)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
if by_epoch:
calls = [call('train', {'learning_rate/model1': 0.018000000000000002, 'learning_rate/model2': 0.009000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 11), call('train', {'learning_rate/model1': 0.018090169943749474, 'learning_rate/model2': 0.009045084971874737, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 61), call('train', {'learning_rate/model1': 0.0019098300562505265, 'learning_rate/model2': 0.0009549150281252633, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 100)]
else:
calls = [call('train', {'learning_rate/model1': 0.018000000000000002, 'learning_rate/model2': 0.009000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0.018090169943749474, 'learning_rate/model2': 0.009045084971874737, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 7), call('train', {'learning_rate/model1': 0.0019098300562505265, 'learning_rate/model2': 0.0009549150281252633, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
elif by_epoch:
calls = [call('train', {'learning_rate': 0.018000000000000002, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 11), call('train', {'learning_rate': 0.018090169943749474, 'momentum': 0.95}, 61), call('train', {'learning_rate': 0.0019098300562505265, 'momentum': 0.95}, 100)]
else:
calls = [call('train', {'learning_rate': 0.018000000000000002, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.018090169943749474, 'momentum': 0.95}, 7), call('train', {'learning_rate': 0.0019098300562505265, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers, max_iters', [(True, 10), (True, 2), (False, 10), (False, 2)])
def test_one_cycle_runner_hook(multi_optimizers, max_iters):
'Test OneCycleLrUpdaterHook and OneCycleMomentumUpdaterHook.'
with pytest.raises(AssertionError):
OneCycleLrUpdaterHook(max_lr=0.1, by_epoch=True)
with pytest.raises(ValueError):
OneCycleLrUpdaterHook(max_lr=0.1, pct_start=(- 0.1))
with pytest.raises(ValueError):
OneCycleLrUpdaterHook(max_lr=0.1, anneal_strategy='sin')
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='OneCycleMomentumUpdaterHook', base_momentum=0.85, max_momentum=0.95, pct_start=0.5, anneal_strategy='cos', three_phase=False)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(type='OneCycleLrUpdaterHook', max_lr=0.01, pct_start=0.5, anneal_strategy='cos', div_factor=25, final_div_factor=10000.0, three_phase=False)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.0003999999999999993, 'learning_rate/model2': 0.0003999999999999993, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 1), call('train', {'learning_rate/model1': 0.00904508879153485, 'learning_rate/model2': 0.00904508879153485, 'momentum/model1': 0.8595491502812526, 'momentum/model2': 0.8595491502812526}, 6), call('train', {'learning_rate/model1': 4e-08, 'learning_rate/model2': 4e-08, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 10)]
else:
calls = [call('train', {'learning_rate': 0.0003999999999999993, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.00904508879153485, 'momentum': 0.8595491502812526}, 6), call('train', {'learning_rate': 4e-08, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters)
args = dict(max_lr=0.01, total_steps=5, pct_start=0.5, anneal_strategy='linear', div_factor=25, final_div_factor=10000.0)
hook = OneCycleLrUpdaterHook(**args)
runner.register_hook(hook)
if (max_iters == 10):
with pytest.raises(ValueError):
runner.run([loader], [('train', 1)])
else:
runner.run([loader], [('train', 1)])
lr_last = runner.current_lr()
t = torch.tensor([0.0], requires_grad=True)
optim = torch.optim.SGD([t], lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optim, **args)
lr_target = []
for _ in range(max_iters):
optim.step()
lr_target.append(optim.param_groups[0]['lr'])
lr_scheduler.step()
assert (lr_target[(- 1)] == lr_last[0])
|
@pytest.mark.parametrize('multi_optimizers', (True, False))
def test_cosine_restart_lr_update_hook(multi_optimizers):
'Test CosineRestartLrUpdaterHook.'
with pytest.raises(AssertionError):
CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5, 0.5], min_lr=0.1, min_lr_ratio=0)
with pytest.raises(AssertionError):
CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5], min_lr_ratio=0)
with pytest.raises(ValueError):
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner()
hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 2], restart_weights=[0.5, 0.5], min_lr=0.0001)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 5], restart_weights=[0.5, 0.5], min_lr_ratio=0)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0.0009549150281252633, 'learning_rate/model2': 0.00047745751406263163, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)]
else:
calls = [call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.0009549150281252633, 'momentum': 0.95}, 10)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers', (True, False))
def test_step_runner_hook(multi_optimizers):
'Test StepLrUpdaterHook.'
with pytest.raises(TypeError):
StepLrUpdaterHook()
with pytest.raises(AssertionError):
StepLrUpdaterHook((- 10))
with pytest.raises(AssertionError):
StepLrUpdaterHook([10, 16, (- 20)])
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((30, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, step=5, gamma=0.5, min_momentum=0.05)
runner.register_hook_from_cfg(hook_cfg)
hook = StepLrUpdaterHook(by_epoch=False, step=5, gamma=0.5, min_lr=0.001)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.475, 'momentum/model2': 0.45}, 6), call('train', {'learning_rate/model1': 0.0025, 'learning_rate/model2': 0.00125, 'momentum/model1': 0.11875, 'momentum/model2': 0.1125}, 16), call('train', {'learning_rate/model1': 0.00125, 'learning_rate/model2': 0.001, 'momentum/model1': 0.059375, 'momentum/model2': 0.05625}, 21), call('train', {'learning_rate/model1': 0.001, 'learning_rate/model2': 0.001, 'momentum/model1': 0.05, 'momentum/model2': 0.05}, 26), call('train', {'learning_rate/model1': 0.001, 'learning_rate/model2': 0.001, 'momentum/model1': 0.05, 'momentum/model2': 0.05}, 30)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.475}, 6), call('train', {'learning_rate': 0.0025, 'momentum': 0.11875}, 16), call('train', {'learning_rate': 0.00125, 'momentum': 0.059375}, 21), call('train', {'learning_rate': 0.001, 'momentum': 0.05}, 26), call('train', {'learning_rate': 0.001, 'momentum': 0.05}, 30)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimizers=multi_optimizers)
hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, step=[4, 6, 8], gamma=0.1)
runner.register_hook_from_cfg(hook_cfg)
hook = StepLrUpdaterHook(by_epoch=False, step=[4, 6, 8], gamma=0.1)
runner.register_hook(hook)
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.002, 'learning_rate/model2': 0.001, 'momentum/model1': 0.095, 'momentum/model2': 0.09000000000000001}, 5), call('train', {'learning_rate/model1': 0.00020000000000000004, 'learning_rate/model2': 0.00010000000000000002, 'momentum/model1': 0.009500000000000001, 'momentum/model2': 0.009000000000000003}, 7), call('train', {'learning_rate/model1': 2.0000000000000005e-05, 'learning_rate/model2': 1.0000000000000003e-05, 'momentum/model1': 0.0009500000000000002, 'momentum/model2': 0.0009000000000000002}, 9)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.002, 'momentum': 0.095}, 5), call('train', {'learning_rate': 0.00020000000000000004, 'momentum': 0.009500000000000001}, 7), call('train', {'learning_rate': 2.0000000000000005e-05, 'momentum': 0.0009500000000000002}, 9)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)])
def test_cyclic_lr_update_hook(multi_optimizers, max_iters, gamma, cyclic_times):
'Test CyclicLrUpdateHook.'
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=True)
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=False, target_ratio=(10.0, 0.1, 0.2))
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=False, step_ratio_up=1.4)
with pytest.raises(ValueError):
CyclicLrUpdaterHook(by_epoch=False, anneal_strategy='sin')
with pytest.raises(AssertionError):
CyclicLrUpdaterHook(by_epoch=False, gamma=0)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters, multi_optimizers=multi_optimizers)
schedule_hook = CyclicLrUpdaterHook(by_epoch=False, target_ratio=(10.0, 1.0), cyclic_times=cyclic_times, step_ratio_up=0.5, anneal_strategy='linear', gamma=gamma)
runner.register_hook(schedule_hook)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
if multi_optimizers:
calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 4), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6)]
else:
calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.11, 'momentum': 0.95}, 4), call('train', {'learning_rate': 0.065, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.11, 'momentum': 0.95}, 7)]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
|
@pytest.mark.parametrize('log_model', (True, False))
def test_mlflow_hook(log_model):
sys.modules['mlflow'] = MagicMock()
sys.modules['mlflow.pytorch'] = MagicMock()
runner = _build_demo_runner()
loader = DataLoader(torch.ones((5, 2)))
hook = MlflowLoggerHook(exp_name='test', log_model=log_model)
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.mlflow.set_experiment.assert_called_with('test')
hook.mlflow.log_metrics.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6)
if log_model:
hook.mlflow_pytorch.log_model.assert_called_with(runner.model, 'models', pip_requirements=[f'torch=={TORCH_VERSION}'])
else:
assert (not hook.mlflow_pytorch.log_model.called)
|
def test_segmind_hook():
sys.modules['segmind'] = MagicMock()
runner = _build_demo_runner()
hook = SegmindLoggerHook()
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.mlflow_log.assert_called_with(hook.log_metrics, {'learning_rate': 0.02, 'momentum': 0.95}, step=runner.epoch, epoch=runner.epoch)
|
def test_wandb_hook():
sys.modules['wandb'] = MagicMock()
runner = _build_demo_runner()
hook = WandbLoggerHook(log_artifact=True)
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.wandb.init.assert_called_with()
hook.wandb.log.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6, commit=True)
hook.wandb.log_artifact.assert_called()
hook.wandb.join.assert_called_with()
|
def test_neptune_hook():
sys.modules['neptune'] = MagicMock()
sys.modules['neptune.new'] = MagicMock()
runner = _build_demo_runner()
hook = NeptuneLoggerHook()
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.neptune.init.assert_called_with()
hook.run['momentum'].log.assert_called_with(0.95, step=6)
hook.run.stop.assert_called_with()
|
def test_dvclive_hook():
sys.modules['dvclive'] = MagicMock()
runner = _build_demo_runner()
hook = DvcliveLoggerHook()
dvclive_mock = hook.dvclive
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
dvclive_mock.set_step.assert_called_with(6)
dvclive_mock.log.assert_called_with('momentum', 0.95)
|
def test_dvclive_hook_model_file(tmp_path):
sys.modules['dvclive'] = MagicMock()
runner = _build_demo_runner()
hook = DvcliveLoggerHook(model_file=osp.join(runner.work_dir, 'model.pth'))
runner.register_hook(hook)
loader = torch.utils.data.DataLoader(torch.ones((5, 2)))
loader = DataLoader(torch.ones((5, 2)))
runner.run([loader, loader], [('train', 1), ('val', 1)])
assert osp.exists(osp.join(runner.work_dir, 'model.pth'))
shutil.rmtree(runner.work_dir)
|
def _build_demo_runner_without_hook(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
if multi_optimizers:
optimizer = {'model1': torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95), 'model2': torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9)}
else:
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(dict(type=runner_type), default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), max_epochs=max_epochs, max_iters=max_iters))
return runner
|
def _build_demo_runner(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False):
log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')])
runner = _build_demo_runner_without_hook(runner_type, max_epochs, max_iters, multi_optimizers)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
|
def test_runner_with_revise_keys():
import os
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
class PrefixModel(nn.Module):
def __init__(self):
super().__init__()
self.backbone = Model()
pmodel = PrefixModel()
model = Model()
checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
torch.save(model.state_dict(), checkpoint_path)
runner = _build_demo_runner(runner_type='EpochBasedRunner')
runner.model = pmodel
state_dict = runner.load_checkpoint(checkpoint_path, revise_keys=[('^', 'backbone.')])
for key in pmodel.backbone.state_dict().keys():
assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key])
torch.save(pmodel.state_dict(), checkpoint_path)
runner.model = model
state_dict = runner.load_checkpoint(checkpoint_path, revise_keys=[('^backbone\\.', '')])
for key in state_dict.keys():
key_stripped = re.sub('^backbone\\.', '', key)
assert torch.equal(model.state_dict()[key_stripped], state_dict[key])
os.remove(checkpoint_path)
|
def test_get_triggered_stages():
class ToyHook(Hook):
def before_run():
pass
def after_epoch():
pass
hook = ToyHook()
expected_stages = ['before_run', 'after_train_epoch', 'after_val_epoch']
assert (hook.get_triggered_stages() == expected_stages)
|
def test_gradient_cumulative_optimizer_hook():
class ToyModel(nn.Module):
def __init__(self, with_norm=False):
super().__init__()
self.fp16_enabled = False
self.fc = nn.Linear(3, 2)
nn.init.constant_(self.fc.weight, 1.0)
nn.init.constant_(self.fc.bias, 1.0)
self.with_norm = with_norm
if with_norm:
self.norm = nn.BatchNorm1d(2)
def forward(self, x):
x = self.fc(x)
if self.with_norm:
x = self.norm(x)
return x
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def build_toy_runner(config=dict(type='EpochBasedRunner', max_epochs=3)):
model = ToyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(config, default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), meta=dict()))
return runner
with pytest.raises(AssertionError):
GradientCumulativeOptimizerHook(cumulative_iters='str')
with pytest.raises(AssertionError):
GradientCumulativeOptimizerHook(cumulative_iters=(- 1))
data = torch.rand((6, 3))
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner()
optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2 = DataLoader(data, batch_size=3)
runner_2 = build_toy_runner()
optimizer_hook = OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2], [('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
data = torch.rand((8, 3))
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner(dict(type='IterBasedRunner', max_iters=8))
optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2_divisible = DataLoader(data[:6], batch_size=3)
loader_2_remainder = DataLoader(data[6:], batch_size=2)
runner_2 = build_toy_runner(dict(type='IterBasedRunner', max_iters=3))
optimizer_hook = OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2_divisible, loader_2_remainder], [('train', 2), ('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
model = ToyModel(with_norm=True)
optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
assert optimizer_hook.has_batch_norm(model)
|
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_gradient_cumulative_fp16_optimizer_hook():
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.fp16_enabled = False
self.fc = nn.Linear(3, 2)
nn.init.constant_(self.fc.weight, 1.0)
nn.init.constant_(self.fc.bias, 1.0)
@auto_fp16(apply_to=('x',))
def forward(self, x):
x = self.fc(x)
return x
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x).mean(), num_samples=x.shape[0])
def build_toy_runner(config=dict(type='EpochBasedRunner', max_epochs=3)):
model = ToyModel().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(config, default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), meta=dict()))
return runner
data = torch.rand((6, 3)).cuda()
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner()
optimizer_hook = GradientCumulativeFp16OptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2 = DataLoader(data, batch_size=3)
runner_2 = build_toy_runner()
optimizer_hook = Fp16OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2], [('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
data = torch.rand((8, 3)).cuda()
loader_1 = DataLoader(data, batch_size=1)
runner_1 = build_toy_runner(dict(type='IterBasedRunner', max_iters=8))
optimizer_hook = GradientCumulativeFp16OptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3)
runner_1.register_hook(optimizer_hook)
runner_1.run([loader_1], [('train', 1)])
loader_2_divisible = DataLoader(data[:6], batch_size=3)
loader_2_remainder = DataLoader(data[6:], batch_size=2)
runner_2 = build_toy_runner(dict(type='IterBasedRunner', max_iters=3))
optimizer_hook = Fp16OptimizerHook(grad_clip=dict(max_norm=0.2))
runner_2.register_hook(optimizer_hook)
runner_2.run([loader_2_divisible, loader_2_remainder], [('train', 2), ('train', 1)])
assert (runner_1.model.fc.weight < 1).all()
assert (runner_1.model.fc.bias < 1).all()
assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight)
assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias)
shutil.rmtree(runner_1.work_dir)
shutil.rmtree(runner_2.work_dir)
|
class SubModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2)
self.gn = nn.GroupNorm(2, 2)
self.param1 = nn.Parameter(torch.ones(1))
def forward(self, x):
return x
|
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(4, 2, kernel_size=1)
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
if OPS_AVAILABLE:
from mmcv.ops import DeformConv2dPack
self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1)
def forward(self, x):
return x
|
class ExampleDuplicateModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False))
self.conv2 = nn.Sequential(nn.Conv2d(4, 2, kernel_size=1))
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
self.conv3 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False))
self.conv3[0] = self.conv1[0]
if OPS_AVAILABLE:
from mmcv.ops import DeformConv2dPack
self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1)
def forward(self, x):
return x
|
class PseudoDataParallel(nn.Module):
def __init__(self):
super().__init__()
self.module = ExampleModel()
def forward(self, x):
return x
|
def check_default_optimizer(optimizer, model, prefix=''):
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
param_groups = optimizer.param_groups[0]
if OPS_AVAILABLE:
param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'dcn.weight', 'dcn.conv_offset.weight', 'dcn.conv_offset.bias']
else:
param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias']
param_dict = dict(model.named_parameters())
assert (len(param_groups['params']) == len(param_names))
for i in range(len(param_groups['params'])):
assert torch.equal(param_groups['params'][i], param_dict[(prefix + param_names[i])])
|
def check_sgd_optimizer(optimizer, model, prefix='', bias_lr_mult=1, bias_decay_mult=1, norm_decay_mult=1, dwconv_decay_mult=1, dcn_offset_lr_mult=1, bypass_duplicate=False):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
model_parameters = list(model.parameters())
assert (len(param_groups) == len(model_parameters))
for (i, param) in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_group['momentum'] == momentum)
param1 = param_groups[0]
assert (param1['lr'] == base_lr)
assert (param1['weight_decay'] == base_wd)
conv1_weight = param_groups[1]
assert (conv1_weight['lr'] == base_lr)
assert (conv1_weight['weight_decay'] == base_wd)
conv2_weight = param_groups[2]
assert (conv2_weight['lr'] == base_lr)
assert (conv2_weight['weight_decay'] == base_wd)
conv2_bias = param_groups[3]
assert (conv2_bias['lr'] == (base_lr * bias_lr_mult))
assert (conv2_bias['weight_decay'] == (base_wd * bias_decay_mult))
bn_weight = param_groups[4]
assert (bn_weight['lr'] == base_lr)
assert (bn_weight['weight_decay'] == (base_wd * norm_decay_mult))
bn_bias = param_groups[5]
assert (bn_bias['lr'] == base_lr)
assert (bn_bias['weight_decay'] == (base_wd * norm_decay_mult))
sub_param1 = param_groups[6]
assert (sub_param1['lr'] == base_lr)
assert (sub_param1['weight_decay'] == base_wd)
sub_conv1_weight = param_groups[7]
assert (sub_conv1_weight['lr'] == base_lr)
assert (sub_conv1_weight['weight_decay'] == (base_wd * dwconv_decay_mult))
sub_conv1_bias = param_groups[8]
assert (sub_conv1_bias['lr'] == (base_lr * bias_lr_mult))
assert (sub_conv1_bias['weight_decay'] == (base_wd * dwconv_decay_mult))
sub_gn_weight = param_groups[9]
assert (sub_gn_weight['lr'] == base_lr)
assert (sub_gn_weight['weight_decay'] == (base_wd * norm_decay_mult))
sub_gn_bias = param_groups[10]
assert (sub_gn_bias['lr'] == base_lr)
assert (sub_gn_bias['weight_decay'] == (base_wd * norm_decay_mult))
if torch.cuda.is_available():
dcn_conv_weight = param_groups[11]
assert (dcn_conv_weight['lr'] == base_lr)
assert (dcn_conv_weight['weight_decay'] == base_wd)
dcn_offset_weight = param_groups[12]
assert (dcn_offset_weight['lr'] == (base_lr * dcn_offset_lr_mult))
assert (dcn_offset_weight['weight_decay'] == base_wd)
dcn_offset_bias = param_groups[13]
assert (dcn_offset_bias['lr'] == (base_lr * dcn_offset_lr_mult))
assert (dcn_offset_bias['weight_decay'] == base_wd)
|
def test_default_optimizer_constructor():
model = ExampleModel()
with pytest.raises(TypeError):
optimizer_cfg = []
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optim_constructor(model)
with pytest.raises(TypeError):
optimizer_cfg = dict(lr=0.0001)
paramwise_cfg = ['error']
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optim_constructor(model)
with pytest.raises(ValueError):
optimizer_cfg = dict(lr=0.0001, weight_decay=None)
paramwise_cfg = dict(bias_decay_mult=1, norm_decay_mult=1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optim_constructor(model)
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model)
model = PseudoDataParallel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = None
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model, prefix='module.')
if torch.cuda.is_available():
model = torch.nn.DataParallel(ExampleModel())
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = None
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model, prefix='module.')
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict()
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model)
model = ExampleModel()
for param in model.parameters():
param.requires_grad = False
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict()
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model)
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, **paramwise_cfg)
model = ExampleModel()
optimizer_cfg = dict(type='Rprop', lr=base_lr)
paramwise_cfg = dict(bias_lr_mult=2)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.Rprop)
assert (optimizer.defaults['lr'] == base_lr)
model_parameters = list(model.parameters())
assert (len(param_groups) == len(model_parameters))
for (i, param) in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_groups[0]['lr'] == base_lr)
assert (param_groups[1]['lr'] == base_lr)
assert (param_groups[2]['lr'] == base_lr)
assert (param_groups[3]['lr'] == (base_lr * paramwise_cfg['bias_lr_mult']))
assert (param_groups[4]['lr'] == base_lr)
assert (param_groups[5]['lr'] == base_lr)
assert (param_groups[6]['lr'] == base_lr)
assert (param_groups[7]['lr'] == base_lr)
assert (param_groups[8]['lr'] == (base_lr * paramwise_cfg['bias_lr_mult']))
assert (param_groups[9]['lr'] == base_lr)
assert (param_groups[10]['lr'] == base_lr)
if OPS_AVAILABLE:
assert (param_groups[11]['lr'] == base_lr)
assert (param_groups[12]['lr'] == base_lr)
assert (param_groups[13]['lr'] == base_lr)
model = PseudoDataParallel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, prefix='module.', **paramwise_cfg)
if torch.cuda.is_available():
model = torch.nn.DataParallel(ExampleModel())
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, prefix='module.', **paramwise_cfg)
for param in model.parameters():
param.requires_grad = False
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
for (i, (name, param)) in enumerate(model.named_parameters()):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_group['momentum'] == momentum)
assert (param_group['lr'] == base_lr)
assert (param_group['weight_decay'] == base_wd)
model = ExampleDuplicateModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1)
with pytest.raises(ValueError) as excinfo:
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optim_constructor(model)
assert ('some parameters appear in more than one parameter group' == excinfo.value)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1, bypass_duplicate=True)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
with warnings.catch_warnings(record=True) as w:
optimizer = optim_constructor(model)
warnings.simplefilter('always')
assert (len(w) == 1)
assert (str(w[0].message) == 'conv3.0 is duplicate. It is skipped since bypass_duplicate=True')
model_parameters = list(model.parameters())
num_params = (14 if OPS_AVAILABLE else 11)
assert (len(optimizer.param_groups) == len(model_parameters) == num_params)
check_sgd_optimizer(optimizer, model, **paramwise_cfg)
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(custom_keys={'param1': dict(lr_mult=10), 'sub': dict(lr_mult=0.1, decay_mult=0), 'sub.gn': dict(lr_mult=0.01), 'non_exist_key': dict(lr_mult=0.0)}, norm_decay_mult=0.5)
with pytest.raises(TypeError):
paramwise_cfg_ = dict(custom_keys=[0.1, 0.0001])
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg_)
optimizer = optim_constructor(model)
with pytest.raises(ValueError):
optimizer_cfg_ = dict(type='SGD', lr=0.01)
paramwise_cfg_ = dict(custom_keys={'.backbone': dict(decay_mult=0.5)})
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg_, paramwise_cfg_)
optimizer = optim_constructor(model)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
param_groups = optimizer.param_groups
groups = []
group_settings = []
groups.append(['param1', 'sub.param1'])
group_settings.append({'lr': (base_lr * 10), 'momentum': momentum, 'weight_decay': base_wd})
groups.append(['sub.gn.weight', 'sub.gn.bias'])
group_settings.append({'lr': (base_lr * 0.01), 'momentum': momentum, 'weight_decay': base_wd})
groups.append(['sub.conv1.weight', 'sub.conv1.bias'])
group_settings.append({'lr': (base_lr * 0.1), 'momentum': momentum, 'weight_decay': 0})
groups.append(['bn.weight', 'bn.bias'])
group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': (base_wd * 0.5)})
groups.append(['conv1.weight', 'conv2.weight', 'conv2.bias'])
group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': base_wd})
num_params = (14 if OPS_AVAILABLE else 11)
assert (len(param_groups) == num_params)
for (i, (name, param)) in enumerate(model.named_parameters()):
assert torch.equal(param_groups[i]['params'][0], param)
for (group, settings) in zip(groups, group_settings):
if (name in group):
for setting in settings:
assert (param_groups[i][setting] == settings[setting]), f'{name} {setting}'
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, momentum=momentum)
paramwise_cfg = dict(custom_keys={'param1': dict(lr_mult=10)})
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == 0)
param_groups = optimizer.param_groups
groups = []
group_settings = []
groups.append(['param1', 'sub.param1'])
group_settings.append({'lr': (base_lr * 10), 'momentum': momentum, 'weight_decay': 0})
groups.append(['sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias'])
group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': 0})
num_params = (14 if OPS_AVAILABLE else 11)
assert (len(param_groups) == num_params)
for (i, (name, param)) in enumerate(model.named_parameters()):
assert torch.equal(param_groups[i]['params'][0], param)
for (group, settings) in zip(groups, group_settings):
if (name in group):
for setting in settings:
assert (param_groups[i][setting] == settings[setting]), f'{name} {setting}'
|
def test_torch_optimizers():
torch_optimizers = ['ASGD', 'Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'LBFGS', 'Optimizer', 'RMSprop', 'Rprop', 'SGD', 'SparseAdam']
assert set(torch_optimizers).issubset(set(TORCH_OPTIMIZERS))
|
def test_build_optimizer_constructor():
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor_cfg = dict(type='DefaultOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, **paramwise_cfg)
from mmcv.runner import OPTIMIZERS
from mmcv.utils import build_from_cfg
@OPTIMIZER_BUILDERS.register_module()
class MyOptimizerConstructor(DefaultOptimizerConstructor):
def __call__(self, model):
if hasattr(model, 'module'):
model = model.module
conv1_lr_mult = self.paramwise_cfg.get('conv1_lr_mult', 1.0)
params = []
for (name, param) in model.named_parameters():
param_group = {'params': [param]}
if (name.startswith('conv1') and param.requires_grad):
param_group['lr'] = (self.base_lr * conv1_lr_mult)
params.append(param_group)
optimizer_cfg['params'] = params
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
paramwise_cfg = dict(conv1_lr_mult=5)
optim_constructor_cfg = dict(type='MyOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
for (i, param) in enumerate(model.parameters()):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_group['momentum'] == momentum)
assert (param_groups[1]['lr'] == (base_lr * paramwise_cfg['conv1_lr_mult']))
assert (param_groups[1]['weight_decay'] == base_wd)
|
def test_build_optimizer():
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optimizer = build_optimizer(model, optimizer_cfg)
check_default_optimizer(optimizer, model)
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum, paramwise_cfg=dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1))
optimizer = build_optimizer(model, optimizer_cfg)
check_sgd_optimizer(optimizer, model, **optimizer_cfg['paramwise_cfg'])
|
class OldStyleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
|
class Model(OldStyleModel):
def train_step(self):
pass
def val_step(self):
pass
|
def test_build_runner():
temp_root = tempfile.gettempdir()
dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)])
default_args = dict(model=Model(), work_dir=osp.join(temp_root, dir_name), logger=logging.getLogger())
cfg = dict(type='EpochBasedRunner', max_epochs=1)
runner = build_runner(cfg, default_args=default_args)
assert (runner._max_epochs == 1)
cfg = dict(type='IterBasedRunner', max_iters=1)
runner = build_runner(cfg, default_args=default_args)
assert (runner._max_iters == 1)
with pytest.raises(ValueError, match='Only one of'):
cfg = dict(type='IterBasedRunner', max_epochs=1, max_iters=1)
runner = build_runner(cfg, default_args=default_args)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_epoch_based_runner(runner_class):
with pytest.warns(DeprecationWarning):
model = OldStyleModel()
def batch_processor():
pass
_ = runner_class(model, batch_processor, logger=logging.getLogger())
with pytest.raises(TypeError):
model = OldStyleModel()
_ = runner_class(model, batch_processor=0, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
optimizer = 'NotAOptimizer'
_ = runner_class(model, optimizer=optimizer, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
optimizers = dict(optim1=torch.optim.Adam(), optim2='NotAOptimizer')
_ = runner_class(model, optimizer=optimizers, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
_ = runner_class(model, logger=None)
with pytest.raises(TypeError):
model = Model()
_ = runner_class(model, logger=logging.getLogger(), meta=['list'])
with pytest.raises(AssertionError):
model = OldStyleModel()
_ = runner_class(model, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
_ = runner_class(model, work_dir=1, logger=logging.getLogger())
with pytest.raises(RuntimeError):
def batch_processor():
pass
model = Model()
_ = runner_class(model, batch_processor, logger=logging.getLogger())
model = Model()
temp_root = tempfile.gettempdir()
dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)])
work_dir = osp.join(temp_root, dir_name)
_ = runner_class(model, work_dir=work_dir, logger=logging.getLogger())
assert osp.isdir(work_dir)
_ = runner_class(model, work_dir=work_dir, logger=logging.getLogger())
assert osp.isdir(work_dir)
os.removedirs(work_dir)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_runner_with_parallel(runner_class):
def batch_processor():
pass
model = MMDataParallel(OldStyleModel())
_ = runner_class(model, batch_processor, logger=logging.getLogger())
model = MMDataParallel(Model())
_ = runner_class(model, logger=logging.getLogger())
with pytest.raises(RuntimeError):
def batch_processor():
pass
model = MMDataParallel(Model())
_ = runner_class(model, batch_processor, logger=logging.getLogger())
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_save_checkpoint(runner_class):
model = Model()
runner = runner_class(model=model, logger=logging.getLogger())
with pytest.raises(TypeError):
runner.save_checkpoint('.', meta=list())
with tempfile.TemporaryDirectory() as root:
runner.save_checkpoint(root)
latest_path = osp.join(root, 'latest.pth')
assert osp.exists(latest_path)
if isinstance(runner, EpochBasedRunner):
first_ckp_path = osp.join(root, 'epoch_1.pth')
elif isinstance(runner, IterBasedRunner):
first_ckp_path = osp.join(root, 'iter_1.pth')
assert osp.exists(first_ckp_path)
if (platform.system() != 'Windows'):
assert (osp.realpath(latest_path) == osp.realpath(first_ckp_path))
else:
pass
torch.load(latest_path)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_build_lr_momentum_hook(runner_class):
model = Model()
runner = runner_class(model=model, logger=logging.getLogger())
lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9)
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 1)
lr_config = dict(policy='Cyclic', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 2)
lr_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 3)
lr_config = dict(policy='Step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11])
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 4)
lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11])
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 5)
mom_config = dict(policy='CosineAnnealing', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95))
runner.register_momentum_hook(mom_config)
assert (len(runner.hooks) == 6)
mom_config = dict(policy='Cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_momentum_hook(mom_config)
assert (len(runner.hooks) == 7)
mom_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_momentum_hook(mom_config)
assert (len(runner.hooks) == 8)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_register_timer_hook(runner_class):
model = Model()
runner = runner_class(model=model, logger=logging.getLogger())
timer_config = None
runner.register_timer_hook(timer_config)
assert (len(runner.hooks) == 0)
timer_config = dict(type='IterTimerHook')
runner.register_timer_hook(timer_config)
assert (len(runner.hooks) == 1)
assert isinstance(runner.hooks[0], IterTimerHook)
timer_config = IterTimerHook()
runner.register_timer_hook(timer_config)
assert (len(runner.hooks) == 2)
assert isinstance(runner.hooks[1], IterTimerHook)
|
def test_set_random_seed():
set_random_seed(0)
a_random = random.randint(0, 10)
a_np_random = np.random.rand(2, 2)
a_torch_random = torch.rand(2, 2)
assert (torch.backends.cudnn.deterministic is False)
assert (torch.backends.cudnn.benchmark is False)
assert (os.environ['PYTHONHASHSEED'] == str(0))
set_random_seed(0, True)
b_random = random.randint(0, 10)
b_np_random = np.random.rand(2, 2)
b_torch_random = torch.rand(2, 2)
assert (torch.backends.cudnn.deterministic is True)
if is_rocm_pytorch:
assert (torch.backends.cudnn.benchmark is True)
else:
assert (torch.backends.cudnn.benchmark is False)
assert (a_random == b_random)
assert np.equal(a_np_random, b_np_random).all()
assert torch.equal(a_torch_random, b_torch_random)
|
def test_construct():
cfg = Config()
assert (cfg.filename is None)
assert (cfg.text == '')
assert (len(cfg) == 0)
assert (cfg._cfg_dict == {})
with pytest.raises(TypeError):
Config([0, 1])
cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test')
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == cfg.pretty_text)
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'a.py')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/b.json')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == json.dumps(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'b.json')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/c.yaml')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == yaml.dump(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'c.yaml')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/h.py')
path = osp.join(osp.dirname(__file__), 'data', 'config')
path = Path(path).as_posix()
cfg_dict = dict(item1='h.py', item2=path, item3='abc_h')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == cfg.pretty_text)
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'h.py')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert (Config.fromfile(dump_file)['item2'] == cfg_dict['item2'])
assert (Config.fromfile(dump_file)['item3'] == cfg_dict['item3'])
cfg_dict = dict(item1='{{fileBasename}}', item2='{{ fileDirname}}', item3='abc_{{ fileBasenameNoExtension }}')
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == cfg_dict['item1'])
assert (Config.fromfile(cfg_file, False)['item2'] == cfg_dict['item2'])
assert (Config.fromfile(cfg_file, False)['item3'] == cfg_dict['item3'])
cfg_file = osp.join(data_path, 'config/p.yaml')
cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config'))
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == yaml.dump(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'p.yaml')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
cfg_file = osp.join(data_path, 'config/o.json')
cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config'))
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == json.dumps(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'o.json')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
|
def test_fromfile():
for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == ((osp.abspath(osp.expanduser(cfg_file)) + '\n') + open(cfg_file, 'r').read()))
cfg_file = osp.join(data_path, 'config', 'q.py')
imported_file = osp.join(data_path, 'config', 'r.py')
target_pkg = osp.join(osp.dirname(__file__), 'r.py')
shutil.copy(imported_file, target_pkg)
Config.fromfile(cfg_file, import_custom_modules=True)
assert (os.environ.pop('TEST_VALUE') == 'test')
os.remove(target_pkg)
with pytest.raises(FileNotFoundError):
Config.fromfile('no_such_file.py')
with pytest.raises(IOError):
Config.fromfile(osp.join(data_path, 'color.jpg'))
|
def test_fromstring():
for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
file_format = osp.splitext(filename)[(- 1)]
in_cfg = Config.fromfile(cfg_file)
out_cfg = Config.fromstring(in_cfg.pretty_text, '.py')
assert (in_cfg._cfg_dict == out_cfg._cfg_dict)
cfg_str = open(cfg_file, 'r').read()
out_cfg = Config.fromstring(cfg_str, file_format)
assert (in_cfg._cfg_dict == out_cfg._cfg_dict)
cfg_file = osp.join(data_path, 'config', 'b.json')
in_cfg = Config.fromfile(cfg_file)
with pytest.raises(Exception):
Config.fromstring(in_cfg.pretty_text, '.json')
cfg_str = open(cfg_file, 'r').read()
with pytest.raises(Exception):
Config.fromstring(cfg_str, '.py')
|
def test_merge_from_base():
cfg_file = osp.join(data_path, 'config/d.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
base_cfg_file = osp.join(data_path, 'config/base.py')
merge_text = ((osp.abspath(osp.expanduser(base_cfg_file)) + '\n') + open(base_cfg_file, 'r').read())
merge_text += ((('\n' + osp.abspath(osp.expanduser(cfg_file))) + '\n') + open(cfg_file, 'r').read())
assert (cfg.text == merge_text)
assert (cfg.item1 == [2, 3])
assert (cfg.item2.a == 1)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test_base')
with pytest.raises(TypeError):
Config.fromfile(osp.join(data_path, 'config/e.py'))
|
def test_merge_from_multiple_bases():
cfg_file = osp.join(data_path, 'config/l.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
with pytest.raises(KeyError):
Config.fromfile(osp.join(data_path, 'config/m.py'))
|
def test_base_variables():
for file in ['t.py', 't.json', 't.yaml']:
cfg_file = osp.join(data_path, f'config/{file}')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
assert (cfg.item8 == file)
assert (cfg.item9 == dict(a=0))
assert (cfg.item10 == [3.1, 4.2, 5.3])
for file in ['u.py', 'u.json', 'u.yaml']:
cfg_file = osp.join(data_path, f'config/{file}')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.base == '_base_.item8')
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
assert (cfg.item8 == 't.py')
assert (cfg.item9 == dict(a=0))
assert (cfg.item10 == [3.1, 4.2, 5.3])
assert (cfg.item11 == 't.py')
assert (cfg.item12 == dict(a=0))
assert (cfg.item13 == [3.1, 4.2, 5.3])
assert (cfg.item14 == [1, 2])
assert (cfg.item15 == dict(a=dict(b=dict(a=0)), b=[False], c=['test'], d=[[{'e': 0}], [{'a': 0}, {'b': 1}]], e=[1, 2]))
cfg_file = osp.join(data_path, 'config/v.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item21 == 't.py')
assert (cfg.item22 == 't.py')
assert (cfg.item23 == [3.1, 4.2, 5.3])
assert (cfg.item24 == [3.1, 4.2, 5.3])
assert (cfg.item25 == dict(a=dict(b=[3.1, 4.2, 5.3]), b=[[3.1, 4.2, 5.3]], c=[[{'e': 't.py'}], [{'a': 0}, {'b': 1}]], e='t.py'))
|
def test_merge_recursive_bases():
cfg_file = osp.join(data_path, 'config/f.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [2, 3])
assert (cfg.item2.a == 1)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test_recursive_bases')
|
def test_merge_from_dict():
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config.fromfile(cfg_file)
input_options = {'item2.a': 1, 'item2.b': 0.1, 'item3': False}
cfg.merge_from_dict(input_options)
assert (cfg.item2 == dict(a=1, b=0.1))
assert (cfg.item3 is False)
cfg_file = osp.join(data_path, 'config/s.py')
cfg = Config.fromfile(cfg_file)
input_options = {'item.0.a': 1, 'item.1.b': 1}
cfg.merge_from_dict(input_options, allow_list_keys=True)
assert (cfg.item == [{'a': 1}, {'b': 1, 'c': 0}])
input_options = {'item.0.a': 1, 'item.1.b': 1}
with pytest.raises(TypeError):
cfg.merge_from_dict(input_options, allow_list_keys=False)
input_options = {'item.2.a': 1}
with pytest.raises(KeyError):
cfg.merge_from_dict(input_options, allow_list_keys=True)
|
def test_merge_delete():
cfg_file = osp.join(data_path, 'config/delete.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == dict(a=0))
assert (cfg.item2 == dict(a=0, b=0))
assert (cfg.item3 is True)
assert (cfg.item4 == 'test')
assert ('_delete_' not in cfg.item2)
assert (type(cfg.item1) == ConfigDict)
assert (type(cfg.item2) == ConfigDict)
|
def test_merge_intermediate_variable():
cfg_file = osp.join(data_path, 'config/i_child.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2 == dict(a=0))
assert (cfg.item3 is True)
assert (cfg.item4 == 'test')
assert (cfg.item_cfg == dict(b=2))
assert (cfg.item5 == dict(cfg=dict(b=1)))
assert (cfg.item6 == dict(cfg=dict(b=2)))
|
def test_fromfile_in_config():
cfg_file = osp.join(data_path, 'config/code.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.cfg.item1 == [1, 2])
assert (cfg.cfg.item2 == dict(a=0))
assert (cfg.cfg.item3 is True)
assert (cfg.cfg.item4 == 'test')
assert (cfg.item5 == 1)
|
def test_dict():
cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test')
for filename in ['a.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
cfg = Config.fromfile(cfg_file)
assert (len(cfg) == 4)
assert (set(cfg.keys()) == set(cfg_dict.keys()))
assert (set(cfg._cfg_dict.keys()) == set(cfg_dict.keys()))
for value in cfg.values():
assert (value in cfg_dict.values())
for (name, value) in cfg.items():
assert (name in cfg_dict)
assert (value in cfg_dict.values())
assert (cfg.item1 == cfg_dict['item1'])
assert (cfg.item2 == cfg_dict['item2'])
assert (cfg.item2.a == 0)
assert (cfg.item3 == cfg_dict['item3'])
assert (cfg.item4 == cfg_dict['item4'])
with pytest.raises(AttributeError):
cfg.not_exist
for name in ['item1', 'item2', 'item3', 'item4']:
assert (name in cfg)
assert (cfg[name] == cfg_dict[name])
assert (cfg.get(name) == cfg_dict[name])
assert (cfg.get('not_exist') is None)
assert (cfg.get('not_exist', 0) == 0)
with pytest.raises(KeyError):
cfg['not_exist']
assert ('item1' in cfg)
assert ('not_exist' not in cfg)
cfg.update(dict(item1=0))
assert (cfg.item1 == 0)
cfg.update(dict(item2=dict(a=1)))
assert (cfg.item2.a == 1)
|
def test_setattr():
cfg = Config()
cfg.item1 = [1, 2]
cfg.item2 = {'a': 0}
cfg['item5'] = {'a': {'b': None}}
assert (cfg._cfg_dict['item1'] == [1, 2])
assert (cfg.item1 == [1, 2])
assert (cfg._cfg_dict['item2'] == {'a': 0})
assert (cfg.item2.a == 0)
assert (cfg._cfg_dict['item5'] == {'a': {'b': None}})
assert (cfg.item5.a.b is None)
|
def test_pretty_text():
cfg_file = osp.join(data_path, 'config/l.py')
cfg = Config.fromfile(cfg_file)
with tempfile.TemporaryDirectory() as temp_config_dir:
text_cfg_filename = osp.join(temp_config_dir, '_text_config.py')
with open(text_cfg_filename, 'w') as f:
f.write(cfg.pretty_text)
text_cfg = Config.fromfile(text_cfg_filename)
assert (text_cfg._cfg_dict == cfg._cfg_dict)
|
def test_dict_action():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
args = parser.parse_args(['--options', 'item2.a=a,b', 'item2.b=[(a,b), [1,2], false]'])
out_dict = {'item2.a': ['a', 'b'], 'item2.b': [('a', 'b'), [1, 2], False]}
assert (args.options == out_dict)
args = parser.parse_args(['--options', 'item2.a=[[1]]'])
out_dict = {'item2.a': [[1]]}
assert (args.options == out_dict)
with pytest.raises(AssertionError):
parser.parse_args(['--options', 'item2.a=[(a,b), [1,2], false'])
args = parser.parse_args(['--options', 'item2.a=1', 'item2.b=0.1', 'item2.c=x', 'item3=false'])
out_dict = {'item2.a': 1, 'item2.b': 0.1, 'item2.c': 'x', 'item3': False}
assert (args.options == out_dict)
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config.fromfile(cfg_file)
cfg.merge_from_dict(args.options)
assert (cfg.item2 == dict(a=1, b=0.1, c='x'))
assert (cfg.item3 is False)
|
def test_dump_mapping():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
with tempfile.TemporaryDirectory() as temp_config_dir:
text_cfg_filename = osp.join(temp_config_dir, '_text_config.py')
cfg.dump(text_cfg_filename)
text_cfg = Config.fromfile(text_cfg_filename)
assert (text_cfg._cfg_dict == cfg._cfg_dict)
|
def test_reserved_key():
cfg_file = osp.join(data_path, 'config/g.py')
with pytest.raises(KeyError):
Config.fromfile(cfg_file)
|
def test_syntax_error():
temp_cfg_file = tempfile.NamedTemporaryFile(suffix='.py', delete=False)
temp_cfg_path = temp_cfg_file.name
with open(temp_cfg_path, 'w') as f:
f.write('a=0b=dict(c=1)')
with pytest.raises(SyntaxError, match='There are syntax errors in config file'):
Config.fromfile(temp_cfg_path)
temp_cfg_file.close()
os.remove(temp_cfg_path)
|
def test_pickle_support():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
with tempfile.TemporaryDirectory() as temp_config_dir:
pkl_cfg_filename = osp.join(temp_config_dir, '_pickle.pkl')
dump(cfg, pkl_cfg_filename)
pkl_cfg = load(pkl_cfg_filename)
assert (pkl_cfg._cfg_dict == cfg._cfg_dict)
|
def test_deprecation():
deprecated_cfg_files = [osp.join(data_path, 'config/deprecated.py'), osp.join(data_path, 'config/deprecated_as_base.py')]
for cfg_file in deprecated_cfg_files:
with pytest.warns(DeprecationWarning):
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == 'expected')
|
def test_deepcopy():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
new_cfg = copy.deepcopy(cfg)
assert isinstance(new_cfg, Config)
assert (new_cfg._cfg_dict == cfg._cfg_dict)
assert (new_cfg._cfg_dict is not cfg._cfg_dict)
assert (new_cfg._filename == cfg._filename)
assert (new_cfg._text == cfg._text)
|
def test_copy():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
new_cfg = copy.copy(cfg)
assert isinstance(new_cfg, Config)
assert (new_cfg is not cfg)
assert (new_cfg._cfg_dict is cfg._cfg_dict)
assert (new_cfg._filename == cfg._filename)
assert (new_cfg._text == cfg._text)
|
def test_collect_env():
try:
import torch
except ModuleNotFoundError:
pytest.skip('skipping tests that require PyTorch')
from mmcv.utils import collect_env
env_info = collect_env()
expected_keys = ['sys.platform', 'Python', 'CUDA available', 'PyTorch', 'PyTorch compiling details', 'OpenCV', 'MMCV', 'MMCV Compiler', 'MMCV CUDA Compiler']
for key in expected_keys:
assert (key in env_info)
if env_info['CUDA available']:
for key in ['CUDA_HOME', 'NVCC']:
assert (key in env_info)
if (sys.platform != 'win32'):
assert ('GCC' in env_info)
assert (env_info['sys.platform'] == sys.platform)
assert (env_info['Python'] == sys.version.replace('\n', ''))
assert (env_info['MMCV'] == mmcv.__version__)
|
def test_load_url():
url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth'
url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth'
if (digit_version(TORCH_VERSION) < digit_version('1.7.0')):
model_zoo.load_url(url1)
with pytest.raises(RuntimeError):
model_zoo.load_url(url2)
else:
model_zoo.load_url(url1)
model_zoo.load_url(url2)
load_url(url1)
if (digit_version(TORCH_VERSION) < digit_version('1.5.0')):
with pytest.raises(RuntimeError):
load_url(url2)
else:
load_url(url2)
|
@patch('torch.distributed.get_rank', (lambda : 0))
@patch('torch.distributed.is_initialized', (lambda : True))
@patch('torch.distributed.is_available', (lambda : True))
def test_get_logger_rank0():
logger = get_logger('rank0.pkg1')
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert (logger.handlers[0].level == logging.INFO)
logger = get_logger('rank0.pkg2', log_level=logging.DEBUG)
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert (logger.handlers[0].level == logging.DEBUG)
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('rank0.pkg3', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 2)
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert isinstance(logger.handlers[1], logging.FileHandler)
logger_pkg3 = get_logger('rank0.pkg3')
assert (id(logger_pkg3) == id(logger))
logging.shutdown()
os.remove(f.name)
logger_pkg3 = get_logger('rank0.pkg3.subpkg')
assert (logger_pkg3.handlers == logger_pkg3.handlers)
|
@patch('torch.distributed.get_rank', (lambda : 1))
@patch('torch.distributed.is_initialized', (lambda : True))
@patch('torch.distributed.is_available', (lambda : True))
def test_get_logger_rank1():
logger = get_logger('rank1.pkg1')
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert (logger.handlers[0].level == logging.INFO)
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('rank1.pkg2', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert (logger.handlers[0].level == logging.INFO)
logging.shutdown()
os.remove(f.name)
|
def test_print_log_print(capsys):
print_log('welcome', logger=None)
(out, _) = capsys.readouterr()
assert (out == 'welcome\n')
|
def test_print_log_silent(capsys, caplog):
print_log('welcome', logger='silent')
(out, _) = capsys.readouterr()
assert (out == '')
assert (len(caplog.records) == 0)
|
def test_print_log_logger(caplog):
print_log('welcome', logger='mmcv')
assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.INFO, 'welcome'))
print_log('welcome', logger='mmcv', level=logging.ERROR)
assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.ERROR, 'welcome'))
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('abc', log_file=f.name)
print_log('welcome', logger=logger)
assert (caplog.record_tuples[(- 1)] == ('abc', logging.INFO, 'welcome'))
with open(f.name, 'r') as fin:
log_text = fin.read()
regex_time = '\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}'
match = re.fullmatch((regex_time + ' - abc - INFO - welcome\\n'), log_text)
assert (match is not None)
logging.shutdown()
os.remove(f.name)
|
def test_print_log_exception():
with pytest.raises(TypeError):
print_log('welcome', logger=0)
|
def test_to_ntuple():
single_number = 2
assert (mmcv.utils.to_1tuple(single_number) == (single_number,))
assert (mmcv.utils.to_2tuple(single_number) == (single_number, single_number))
assert (mmcv.utils.to_3tuple(single_number) == (single_number, single_number, single_number))
assert (mmcv.utils.to_4tuple(single_number) == (single_number, single_number, single_number, single_number))
assert (mmcv.utils.to_ntuple(5)(single_number) == (single_number, single_number, single_number, single_number, single_number))
assert (mmcv.utils.to_ntuple(6)(single_number) == (single_number, single_number, single_number, single_number, single_number, single_number))
|
def test_iter_cast():
assert (mmcv.list_cast([1, 2, 3], int) == [1, 2, 3])
assert (mmcv.list_cast(['1.1', 2, '3'], float) == [1.1, 2.0, 3.0])
assert (mmcv.list_cast([1, 2, 3], str) == ['1', '2', '3'])
assert (mmcv.tuple_cast((1, 2, 3), str) == ('1', '2', '3'))
assert (next(mmcv.iter_cast([1, 2, 3], str)) == '1')
with pytest.raises(TypeError):
mmcv.iter_cast([1, 2, 3], '')
with pytest.raises(TypeError):
mmcv.iter_cast(1, str)
|
def test_is_seq_of():
assert mmcv.is_seq_of([1.0, 2.0, 3.0], float)
assert mmcv.is_seq_of([(1,), (2,), (3,)], tuple)
assert mmcv.is_seq_of((1.0, 2.0, 3.0), float)
assert mmcv.is_list_of([1.0, 2.0, 3.0], float)
assert (not mmcv.is_seq_of((1.0, 2.0, 3.0), float, seq_type=list))
assert (not mmcv.is_tuple_of([1.0, 2.0, 3.0], float))
assert (not mmcv.is_seq_of([1.0, 2, 3], int))
assert (not mmcv.is_seq_of((1.0, 2, 3), int))
|
def test_slice_list():
in_list = [1, 2, 3, 4, 5, 6]
assert (mmcv.slice_list(in_list, [1, 2, 3]) == [[1], [2, 3], [4, 5, 6]])
assert (mmcv.slice_list(in_list, [len(in_list)]) == [in_list])
with pytest.raises(TypeError):
mmcv.slice_list(in_list, 2.0)
with pytest.raises(ValueError):
mmcv.slice_list(in_list, [1, 2])
|
def test_concat_list():
assert (mmcv.concat_list([[1, 2]]) == [1, 2])
assert (mmcv.concat_list([[1, 2], [3, 4, 5], [6]]) == [1, 2, 3, 4, 5, 6])
|
def test_requires_package(capsys):
@mmcv.requires_package('nnn')
def func_a():
pass
@mmcv.requires_package(['numpy', 'n1', 'n2'])
def func_b():
pass
@mmcv.requires_package('numpy')
def func_c():
return 1
with pytest.raises(RuntimeError):
func_a()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n')
with pytest.raises(RuntimeError):
func_b()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n')
assert (func_c() == 1)
|
def test_requires_executable(capsys):
@mmcv.requires_executable('nnn')
def func_a():
pass
@mmcv.requires_executable(['ls', 'n1', 'n2'])
def func_b():
pass
@mmcv.requires_executable('mv')
def func_c():
return 1
with pytest.raises(RuntimeError):
func_a()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n')
with pytest.raises(RuntimeError):
func_b()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n')
assert (func_c() == 1)
|
def test_import_modules_from_strings():
import os.path as osp_
import sys as sys_
(osp, sys) = mmcv.import_modules_from_strings(['os.path', 'sys'])
assert (osp == osp_)
assert (sys == sys_)
osp = mmcv.import_modules_from_strings('os.path')
assert (osp == osp_)
assert (mmcv.import_modules_from_strings(None) is None)
assert (mmcv.import_modules_from_strings([]) is None)
assert (mmcv.import_modules_from_strings('') is None)
with pytest.raises(TypeError):
mmcv.import_modules_from_strings(1)
with pytest.raises(TypeError):
mmcv.import_modules_from_strings([1])
with pytest.raises(ImportError):
mmcv.import_modules_from_strings('_not_implemented_module')
with pytest.warns(UserWarning):
imported = mmcv.import_modules_from_strings('_not_implemented_module', allow_failed_imports=True)
assert (imported is None)
with pytest.warns(UserWarning):
imported = mmcv.import_modules_from_strings(['os.path', '_not_implemented'], allow_failed_imports=True)
assert (imported[0] == osp)
assert (imported[1] is None)
|
def test_is_method_overridden():
class Base():
def foo1():
pass
def foo2():
pass
class Sub(Base):
def foo1():
pass
assert mmcv.is_method_overridden('foo1', Base, Sub)
assert (not mmcv.is_method_overridden('foo2', Base, Sub))
sub_instance = Sub()
assert mmcv.is_method_overridden('foo1', Base, sub_instance)
assert (not mmcv.is_method_overridden('foo2', Base, sub_instance))
base_instance = Base()
with pytest.raises(AssertionError):
mmcv.is_method_overridden('foo1', base_instance, sub_instance)
|
def test_has_method():
class Foo():
def __init__(self, name):
self.name = name
def print_name(self):
print(self.name)
foo = Foo('foo')
assert (not has_method(foo, 'name'))
assert has_method(foo, 'print_name')
|
def test_deprecated_api_warning():
@deprecated_api_warning(name_dict=dict(old_key='new_key'))
def dummy_func(new_key=1):
return new_key
assert (dummy_func(old_key=2) == 2)
with pytest.raises(AssertionError):
dummy_func(old_key=1, new_key=2)
|
class TestJit(object):
def test_add_dict(self):
@mmcv.jit
def add_dict(oper):
rets = (oper['x'] + oper['y'])
return {'result': rets}
def add_dict_pyfunc(oper):
rets = (oper['x'] + oper['y'])
return {'result': rets}
a = torch.rand((3, 4))
b = torch.rand((3, 4))
oper = {'x': a, 'y': b}
rets_t = add_dict(oper)
rets = add_dict_pyfunc(oper)
assert ('result' in rets)
assert (rets_t['result'] == rets['result']).all()
def test_add_list(self):
@mmcv.jit
def add_list(oper, x, y):
rets = {}
for (idx, pair) in enumerate(oper):
rets[f'k{idx}'] = (pair['x'] + pair['y'])
rets[f'k{len(oper)}'] = (x + y)
return rets
def add_list_pyfunc(oper, x, y):
rets = {}
for (idx, pair) in enumerate(oper):
rets[f'k{idx}'] = (pair['x'] + pair['y'])
rets[f'k{len(oper)}'] = (x + y)
return rets
pair_num = 3
oper = []
for _ in range(pair_num):
oper.append({'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))})
a = torch.rand((3, 4))
b = torch.rand((3, 4))
rets = add_list_pyfunc(oper, x=a, y=b)
rets_t = add_list(oper, x=a, y=b)
for idx in range((pair_num + 1)):
assert (f'k{idx}' in rets_t)
assert (rets[f'k{idx}'] == rets_t[f'k{idx}']).all()
@skip_no_parrots
def test_jit_cache(self):
@mmcv.jit
def func(oper):
if (oper['const'] > 1):
return ((oper['x'] * 2) + oper['y'])
else:
return ((oper['x'] * 2) - oper['y'])
def pyfunc(oper):
if (oper['const'] > 1):
return ((oper['x'] * 2) + oper['y'])
else:
return ((oper['x'] * 2) - oper['y'])
assert (len(func._cache._cache) == 0)
oper = {'const': 2, 'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))}
rets_plus = pyfunc(oper)
rets_plus_t = func(oper)
assert (rets_plus == rets_plus_t).all()
assert (len(func._cache._cache) == 1)
oper['const'] = 0.5
rets_minus = pyfunc(oper)
rets_minus_t = func(oper)
assert (rets_minus == rets_minus_t).all()
assert (len(func._cache._cache) == 2)
rets_a = ((rets_minus_t + rets_plus_t) / 4)
assert torch.allclose(oper['x'], rets_a)
@skip_no_parrots
def test_jit_shape(self):
@mmcv.jit
def func(a):
return (a + 1)
assert (len(func._cache._cache) == 0)
a = torch.ones((3, 4))
r = func(a)
assert (r.shape == (3, 4))
assert (r == 2).all()
assert (len(func._cache._cache) == 1)
a = torch.ones((2, 3, 4))
r = func(a)
assert (r.shape == (2, 3, 4))
assert (r == 2).all()
assert (len(func._cache._cache) == 2)
@skip_no_parrots
def test_jit_kwargs(self):
@mmcv.jit
def func(a, b):
return torch.mean(((a - b) * (a - b)))
assert (len(func._cache._cache) == 0)
x = torch.rand((16, 32))
y = torch.rand((16, 32))
func(x, y)
assert (len(func._cache._cache) == 1)
func(x, b=y)
assert (len(func._cache._cache) == 1)
func(b=y, a=x)
assert (len(func._cache._cache) == 1)
def test_jit_derivate(self):
@mmcv.jit(derivate=True)
def func(x, y):
return ((x + 2) * (y - 2))
a = torch.rand((3, 4))
b = torch.rand((3, 4))
a.requires_grad = True
c = func(a, b)
assert c.requires_grad
d = torch.empty_like(c)
d.fill_(1.0)
c.backward(d)
assert torch.allclose(a.grad, (b - 2))
assert (b.grad is None)
a.grad = None
c = func(a, b)
assert c.requires_grad
d = torch.empty_like(c)
d.fill_(2.7)
c.backward(d)
assert torch.allclose(a.grad, (2.7 * (b - 2)))
assert (b.grad is None)
def test_jit_optimize(self):
@mmcv.jit(optimize=True)
def func(a, b):
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
return torch.mean(((a - b) * (a - b)))
a = torch.rand((16, 32))
b = torch.rand((16, 32))
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
@mmcv.skip_no_elena
def test_jit_coderize(self):
if (not torch.cuda.is_available()):
return
@mmcv.jit(coderize=True)
def func(a, b):
return ((a + b) * (a - b))
def pyfunc(a, b):
return ((a + b) * (a - b))
a = torch.rand((16, 32), device='cuda')
b = torch.rand((16, 32), device='cuda')
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
def test_jit_value_dependent(self):
@mmcv.jit
def func(a, b):
torch.nonzero(a)
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
torch.nonzero(a)
return torch.mean(((a - b) * (a - b)))
a = torch.rand((16, 32))
b = torch.rand((16, 32))
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
@skip_no_parrots
def test_jit_check_input(self):
def func(x):
y = torch.rand_like(x)
return (x + y)
a = torch.ones((3, 4))
with pytest.raises(AssertionError):
func = mmcv.jit(func, check_input=(a,))
@skip_no_parrots
def test_jit_partial_shape(self):
@mmcv.jit(full_shape=False)
def func(a, b):
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
return torch.mean(((a - b) * (a - b)))
a = torch.rand((3, 4))
b = torch.rand((3, 4))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 1)
a = torch.rand((6, 5))
b = torch.rand((6, 5))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 1)
a = torch.rand((3, 4, 5))
b = torch.rand((3, 4, 5))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 2)
a = torch.rand((1, 9, 8))
b = torch.rand((1, 9, 8))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 2)
def test_instance_method(self):
class T(object):
def __init__(self, shape):
self._c = torch.rand(shape)
@mmcv.jit
def test_method(self, x, y):
return ((x * self._c) + y)
shape = (16, 32)
t = T(shape)
a = torch.rand(shape)
b = torch.rand(shape)
res = ((a * t._c) + b)
jit_res = t.test_method(a, b)
assert torch.allclose(res, jit_res)
t = T(shape)
res = ((a * t._c) + b)
jit_res = t.test_method(a, b)
assert torch.allclose(res, jit_res)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.