code
stringlengths
17
6.64M
@COMPONENTS.register_module() class FooLinearConv1d(BaseModule): def __init__(self, linear=None, conv1d=None, init_cfg=None): super().__init__(init_cfg) if (linear is not None): self.linear = build_from_cfg(linear, COMPONENTS) if (conv1d is not None): self.conv1d = build_from_cfg(conv1d, COMPONENTS) def forward(self, x): x = self.linear(x) return self.conv1d(x)
@FOOMODELS.register_module() class FooModel(BaseModule): def __init__(self, component1=None, component2=None, component3=None, component4=None, init_cfg=None) -> None: super().__init__(init_cfg) if (component1 is not None): self.component1 = build_from_cfg(component1, COMPONENTS) if (component2 is not None): self.component2 = build_from_cfg(component2, COMPONENTS) if (component3 is not None): self.component3 = build_from_cfg(component3, COMPONENTS) if (component4 is not None): self.component4 = build_from_cfg(component4, COMPONENTS) self.reg = nn.Linear(3, 4)
def test_initilization_info_logger(): import os import torch.nn as nn from mmcv.utils.logging import get_logger class OverloadInitConv(nn.Conv2d, BaseModule): def init_weights(self): for p in self.parameters(): with torch.no_grad(): p.fill_(1) class CheckLoggerModel(BaseModule): def __init__(self, init_cfg=None): super(CheckLoggerModel, self).__init__(init_cfg) self.conv1 = nn.Conv2d(1, 1, 1, 1) self.conv2 = OverloadInitConv(1, 1, 1, 1) self.conv3 = nn.Conv2d(1, 1, 1, 1) self.fc1 = nn.Linear(1, 1) init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)] model = CheckLoggerModel(init_cfg=init_cfg) train_log = '20210720_132454.log' workdir = tempfile.mkdtemp() log_file = os.path.join(workdir, train_log) get_logger('init_logger', log_file=log_file) assert (not hasattr(model, '_params_init_info')) model.init_weights() assert (not hasattr(model, '_params_init_info')) assert os.path.exists(log_file) lines = mmcv.list_from_file(log_file) for (i, line) in enumerate(lines): if ('conv1.weight' in line): assert ('NormalInit' in lines[(i + 1)]) if ('conv2.weight' in line): assert ('OverloadInitConv' in lines[(i + 1)]) if ('fc1.weight' in line): assert ('ConstantInit' in lines[(i + 1)]) class OverloadInitConvFc(nn.Conv2d, BaseModule): def __init__(self, *args, **kwargs): super(OverloadInitConvFc, self).__init__(*args, **kwargs) self.conv1 = nn.Linear(1, 1) def init_weights(self): for p in self.parameters(): with torch.no_grad(): p.fill_(1) class CheckLoggerModel(BaseModule): def __init__(self, init_cfg=None): super(CheckLoggerModel, self).__init__(init_cfg) self.conv1 = nn.Conv2d(1, 1, 1, 1) self.conv2 = OverloadInitConvFc(1, 1, 1, 1) self.conv3 = nn.Conv2d(1, 1, 1, 1) self.fc1 = nn.Linear(1, 1) class TopLevelModule(BaseModule): def __init__(self, init_cfg=None, checklog_init_cfg=None): super(TopLevelModule, self).__init__(init_cfg) self.module1 = CheckLoggerModel(checklog_init_cfg) self.module2 = OverloadInitConvFc(1, 1, 1, 1) checklog_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='conv3', std=0.01, bias_prob=0.01)), dict(type='Constant', layer='Linear', val=0.0, bias=1.0)] top_level_init_cfg = [dict(type='Normal', layer='Conv2d', std=0.01, override=dict(type='Normal', name='module2', std=0.01, bias_prob=0.01))] model = TopLevelModule(init_cfg=top_level_init_cfg, checklog_init_cfg=checklog_init_cfg) model.module1.init_weights() model.module2.init_weights() model.init_weights() model.module1.init_weights() model.module2.init_weights() assert (not hasattr(model, '_params_init_info')) model.init_weights() assert (not hasattr(model, '_params_init_info')) assert os.path.exists(log_file) lines = mmcv.list_from_file(log_file) for (i, line) in enumerate(lines): if (('TopLevelModule' in line) and ('init_cfg' not in line)): assert ('the same' in line)
def test_update_init_info(): class DummyModel(BaseModule): def __init__(self, init_cfg=None): super().__init__(init_cfg) self.conv1 = nn.Conv2d(1, 1, 1, 1) self.conv3 = nn.Conv2d(1, 1, 1, 1) self.fc1 = nn.Linear(1, 1) model = DummyModel() from collections import defaultdict model._params_init_info = defaultdict(dict) for (name, param) in model.named_parameters(): model._params_init_info[param]['init_info'] = 'init' model._params_init_info[param]['tmp_mean_value'] = param.data.mean() with torch.no_grad(): for p in model.parameters(): p.fill_(1) update_init_info(model, init_info='fill_1') for item in model._params_init_info.values(): assert (item['init_info'] == 'fill_1') assert (item['tmp_mean_value'] == 1) model.conv1.bias = nn.Parameter(torch.ones_like(model.conv1.bias)) with pytest.raises(AssertionError): update_init_info(model, init_info=' ')
def test_model_weight_init(): '\n Config\n model (FooModel, Linear: weight=1, bias=2, Conv1d: weight=3, bias=4,\n Conv2d: weight=5, bias=6)\n ├──component1 (FooConv1d)\n ├──component2 (FooConv2d)\n ├──component3 (FooLinear)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear)\n ├──conv1d (FooConv1d)\n ├──reg (nn.Linear)\n\n Parameters after initialization\n model (FooModel)\n ├──component1 (FooConv1d, weight=3, bias=4)\n ├──component2 (FooConv2d, weight=5, bias=6)\n ├──component3 (FooLinear, weight=1, bias=2)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear, weight=1, bias=2)\n ├──conv1d (FooConv1d, weight=3, bias=4)\n ├──reg (nn.Linear, weight=1, bias=2)\n ' model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear'), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d'), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear'), component4=dict(type='FooLinearConv1d', linear=dict(type='FooLinear'), conv1d=dict(type='FooConv1d'))) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 3.0)) assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 4.0)) assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 5.0)) assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 6.0)) assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0)) assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0)) assert torch.equal(model.component4.linear.linear.weight, torch.full(model.component4.linear.linear.weight.shape, 1.0)) assert torch.equal(model.component4.linear.linear.bias, torch.full(model.component4.linear.linear.bias.shape, 2.0)) assert torch.equal(model.component4.conv1d.conv1d.weight, torch.full(model.component4.conv1d.conv1d.weight.shape, 3.0)) assert torch.equal(model.component4.conv1d.conv1d.bias, torch.full(model.component4.conv1d.conv1d.bias.shape, 4.0)) assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 1.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 2.0))
def test_nest_components_weight_init(): '\n Config\n model (FooModel, Linear: weight=1, bias=2, Conv1d: weight=3, bias=4,\n Conv2d: weight=5, bias=6)\n ├──component1 (FooConv1d, Conv1d: weight=7, bias=8)\n ├──component2 (FooConv2d, Conv2d: weight=9, bias=10)\n ├──component3 (FooLinear)\n ├──component4 (FooLinearConv1d, Linear: weight=11, bias=12)\n ├──linear (FooLinear, Linear: weight=11, bias=12)\n ├──conv1d (FooConv1d)\n ├──reg (nn.Linear, weight=13, bias=14)\n\n Parameters after initialization\n model (FooModel)\n ├──component1 (FooConv1d, weight=7, bias=8)\n ├──component2 (FooConv2d, weight=9, bias=10)\n ├──component3 (FooLinear, weight=1, bias=2)\n ├──component4 (FooLinearConv1d)\n ├──linear (FooLinear, weight=1, bias=2)\n ├──conv1d (FooConv1d, weight=3, bias=4)\n ├──reg (nn.Linear, weight=13, bias=14)\n ' model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear', override=dict(type='Constant', name='reg', val=13, bias=14)), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=7, bias=8)), component2=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=9, bias=10)), component3=dict(type='FooLinear'), component4=dict(type='FooLinearConv1d', linear=dict(type='FooLinear'), conv1d=dict(type='FooConv1d'))) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 7.0)) assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 8.0)) assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 9.0)) assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 10.0)) assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0)) assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0)) assert torch.equal(model.component4.linear.linear.weight, torch.full(model.component4.linear.linear.weight.shape, 1.0)) assert torch.equal(model.component4.linear.linear.bias, torch.full(model.component4.linear.linear.bias.shape, 2.0)) assert torch.equal(model.component4.conv1d.conv1d.weight, torch.full(model.component4.conv1d.conv1d.weight.shape, 3.0)) assert torch.equal(model.component4.conv1d.conv1d.bias, torch.full(model.component4.conv1d.conv1d.bias.shape, 4.0)) assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 13.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 14.0))
def test_without_layer_weight_init(): model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, layer='Linear'), dict(type='Constant', val=3, bias=4, layer='Conv1d'), dict(type='Constant', val=5, bias=6, layer='Conv2d')], component1=dict(type='FooConv1d', init_cfg=dict(type='Constant', val=7, bias=8)), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear')) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 3.0)) assert torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 4.0)) assert torch.equal(model.component2.conv2d.weight, torch.full(model.component2.conv2d.weight.shape, 5.0)) assert torch.equal(model.component2.conv2d.bias, torch.full(model.component2.conv2d.bias.shape, 6.0)) assert torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 1.0)) assert torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 2.0)) assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 1.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 2.0))
def test_override_weight_init(): model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=10, bias=20, override=dict(name='reg'))], component1=dict(type='FooConv1d'), component3=dict(type='FooLinear')) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 10.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 20.0)) assert (not torch.equal(model.component1.conv1d.weight, torch.full(model.component1.conv1d.weight.shape, 10.0))) assert (not torch.equal(model.component1.conv1d.bias, torch.full(model.component1.conv1d.bias.shape, 20.0))) assert (not torch.equal(model.component3.linear.weight, torch.full(model.component3.linear.weight.shape, 10.0))) assert (not torch.equal(model.component3.linear.bias, torch.full(model.component3.linear.bias.shape, 20.0))) model_cfg = dict(type='FooModel', init_cfg=[dict(type='Constant', val=1, bias=2, override=dict(name='reg', type='Constant', val=30, bias=40))], component1=dict(type='FooConv1d'), component2=dict(type='FooConv2d'), component3=dict(type='FooLinear')) model = build_from_cfg(model_cfg, FOOMODELS) model.init_weights() assert torch.equal(model.reg.weight, torch.full(model.reg.weight.shape, 30.0)) assert torch.equal(model.reg.bias, torch.full(model.reg.bias.shape, 40.0))
def test_sequential_model_weight_init(): seq_model_cfg = [dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))] layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg] seq_model = Sequential(*layers) seq_model.init_weights() assert torch.equal(seq_model[0].conv1d.weight, torch.full(seq_model[0].conv1d.weight.shape, 0.0)) assert torch.equal(seq_model[0].conv1d.bias, torch.full(seq_model[0].conv1d.bias.shape, 1.0)) assert torch.equal(seq_model[1].conv2d.weight, torch.full(seq_model[1].conv2d.weight.shape, 2.0)) assert torch.equal(seq_model[1].conv2d.bias, torch.full(seq_model[1].conv2d.bias.shape, 3.0)) layers = [build_from_cfg(cfg, COMPONENTS) for cfg in seq_model_cfg] seq_model = Sequential(*layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0)) seq_model.init_weights() assert torch.equal(seq_model[0].conv1d.weight, torch.full(seq_model[0].conv1d.weight.shape, 0.0)) assert torch.equal(seq_model[0].conv1d.bias, torch.full(seq_model[0].conv1d.bias.shape, 1.0)) assert torch.equal(seq_model[1].conv2d.weight, torch.full(seq_model[1].conv2d.weight.shape, 2.0)) assert torch.equal(seq_model[1].conv2d.bias, torch.full(seq_model[1].conv2d.bias.shape, 3.0))
def test_modulelist_weight_init(): models_cfg = [dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))] layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg] modellist = ModuleList(layers) modellist.init_weights() assert torch.equal(modellist[0].conv1d.weight, torch.full(modellist[0].conv1d.weight.shape, 0.0)) assert torch.equal(modellist[0].conv1d.bias, torch.full(modellist[0].conv1d.bias.shape, 1.0)) assert torch.equal(modellist[1].conv2d.weight, torch.full(modellist[1].conv2d.weight.shape, 2.0)) assert torch.equal(modellist[1].conv2d.bias, torch.full(modellist[1].conv2d.bias.shape, 3.0)) layers = [build_from_cfg(cfg, COMPONENTS) for cfg in models_cfg] modellist = ModuleList(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0)) modellist.init_weights() assert torch.equal(modellist[0].conv1d.weight, torch.full(modellist[0].conv1d.weight.shape, 0.0)) assert torch.equal(modellist[0].conv1d.bias, torch.full(modellist[0].conv1d.bias.shape, 1.0)) assert torch.equal(modellist[1].conv2d.weight, torch.full(modellist[1].conv2d.weight.shape, 2.0)) assert torch.equal(modellist[1].conv2d.bias, torch.full(modellist[1].conv2d.bias.shape, 3.0))
def test_moduledict_weight_init(): models_cfg = dict(foo_conv_1d=dict(type='FooConv1d', init_cfg=dict(type='Constant', layer='Conv1d', val=0.0, bias=1.0)), foo_conv_2d=dict(type='FooConv2d', init_cfg=dict(type='Constant', layer='Conv2d', val=2.0, bias=3.0))) layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()} modeldict = ModuleDict(layers) modeldict.init_weights() assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0)) assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0)) layers = {name: build_from_cfg(cfg, COMPONENTS) for (name, cfg) in models_cfg.items()} modeldict = ModuleDict(layers, init_cfg=dict(type='Constant', layer=['Conv1d', 'Conv2d'], val=4.0, bias=5.0)) modeldict.init_weights() assert torch.equal(modeldict['foo_conv_1d'].conv1d.weight, torch.full(modeldict['foo_conv_1d'].conv1d.weight.shape, 0.0)) assert torch.equal(modeldict['foo_conv_1d'].conv1d.bias, torch.full(modeldict['foo_conv_1d'].conv1d.bias.shape, 1.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.weight, torch.full(modeldict['foo_conv_2d'].conv2d.weight.shape, 2.0)) assert torch.equal(modeldict['foo_conv_2d'].conv2d.bias, torch.full(modeldict['foo_conv_2d'].conv2d.bias.shape, 3.0))
@MODULE_WRAPPERS.register_module() class DDPWrapper(object): def __init__(self, module): self.module = module
class Block(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1) self.norm = nn.BatchNorm2d(3)
class Model(nn.Module): def __init__(self): super().__init__() self.block = Block() self.conv = nn.Conv2d(3, 3, 1)
class Mockpavimodel(object): def __init__(self, name='fakename'): self.name = name def download(self, file): pass
def assert_tensor_equal(tensor_a, tensor_b): assert tensor_a.eq(tensor_b).all()
def test_get_state_dict(): if (torch.__version__ == 'parrots'): state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'conv.weight', 'conv.bias']) else: state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'block.norm.num_batches_tracked', 'conv.weight', 'conv.bias']) model = Model() state_dict = get_state_dict(model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], model.block.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], model.block.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], model.block.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], model.block.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], model.block.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], model.block.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], model.block.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], model.conv.weight) assert_tensor_equal(state_dict['conv.bias'], model.conv.bias) wrapped_model = DDPWrapper(model) state_dict = get_state_dict(wrapped_model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.weight) assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.bias) for (name, module) in wrapped_model.module._modules.items(): module = DataParallel(module) wrapped_model.module._modules[name] = module state_dict = get_state_dict(wrapped_model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.module.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.module.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.module.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.module.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.module.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.module.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.module.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.module.weight) assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.module.bias)
def test_load_pavimodel_dist(): sys.modules['pavi'] = MagicMock() sys.modules['pavi.modelcloud'] = MagicMock() pavimodel = Mockpavimodel() import pavi pavi.modelcloud.get = MagicMock(return_value=pavimodel) with pytest.raises(AssertionError): _ = load_from_pavi('MyPaviFolder/checkpoint.pth') with pytest.raises(FileNotFoundError): _ = load_from_pavi('pavi://checkpoint.pth')
def test_load_checkpoint_with_prefix(): class FooModule(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(1, 2) self.conv2d = nn.Conv2d(3, 1, 3) self.conv2d_2 = nn.Conv2d(3, 2, 3) model = FooModule() nn.init.constant_(model.linear.weight, 1) nn.init.constant_(model.linear.bias, 2) nn.init.constant_(model.conv2d.weight, 3) nn.init.constant_(model.conv2d.bias, 4) nn.init.constant_(model.conv2d_2.weight, 5) nn.init.constant_(model.conv2d_2.bias, 6) with TemporaryDirectory(): torch.save(model.state_dict(), 'model.pth') prefix = 'conv2d' state_dict = _load_checkpoint_with_prefix(prefix, 'model.pth') assert torch.equal(model.conv2d.state_dict()['weight'], state_dict['weight']) assert torch.equal(model.conv2d.state_dict()['bias'], state_dict['bias']) with pytest.raises(AssertionError): prefix = 'back' _load_checkpoint_with_prefix(prefix, 'model.pth')
def test_load_checkpoint(): import os import re import tempfile class PrefixModel(nn.Module): def __init__(self): super().__init__() self.backbone = Model() pmodel = PrefixModel() model = Model() checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') torch.save(model.state_dict(), checkpoint_path) state_dict = load_checkpoint(pmodel, checkpoint_path, revise_keys=[('^', 'backbone.')]) for key in pmodel.backbone.state_dict().keys(): assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key]) torch.save(pmodel.state_dict(), checkpoint_path) state_dict = load_checkpoint(model, checkpoint_path, revise_keys=[('^backbone\\.', '')]) for key in state_dict.keys(): key_stripped = re.sub('^backbone\\.', '', key) assert torch.equal(model.state_dict()[key_stripped], state_dict[key]) os.remove(checkpoint_path)
def test_load_checkpoint_metadata(): import os import tempfile from mmcv.runner import load_checkpoint, save_checkpoint class ModelV1(nn.Module): def __init__(self): super().__init__() self.block = Block() self.conv1 = nn.Conv2d(3, 3, 1) self.conv2 = nn.Conv2d(3, 3, 1) nn.init.normal_(self.conv1.weight) nn.init.normal_(self.conv2.weight) class ModelV2(nn.Module): _version = 2 def __init__(self): super().__init__() self.block = Block() self.conv0 = nn.Conv2d(3, 3, 1) self.conv1 = nn.Conv2d(3, 3, 1) nn.init.normal_(self.conv0.weight) nn.init.normal_(self.conv1.weight) def _load_from_state_dict(self, state_dict, prefix, local_metadata, *args, **kwargs): 'load checkpoints.' version = local_metadata.get('version', None) if ((version is None) or (version < 2)): state_dict_keys = list(state_dict.keys()) convert_map = {'conv1': 'conv0', 'conv2': 'conv1'} for k in state_dict_keys: for (ori_str, new_str) in convert_map.items(): if k.startswith((prefix + ori_str)): new_key = k.replace(ori_str, new_str) state_dict[new_key] = state_dict[k] del state_dict[k] super()._load_from_state_dict(state_dict, prefix, local_metadata, *args, **kwargs) model_v1 = ModelV1() model_v1_conv0_weight = model_v1.conv1.weight.detach() model_v1_conv1_weight = model_v1.conv2.weight.detach() model_v2 = ModelV2() model_v2_conv0_weight = model_v2.conv0.weight.detach() model_v2_conv1_weight = model_v2.conv1.weight.detach() ckpt_v1_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v1.pth') ckpt_v2_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v2.pth') save_checkpoint(model_v1, ckpt_v1_path) save_checkpoint(model_v2, ckpt_v2_path) load_checkpoint(model_v2, ckpt_v1_path) assert torch.allclose(model_v2.conv0.weight, model_v1_conv0_weight) assert torch.allclose(model_v2.conv1.weight, model_v1_conv1_weight) load_checkpoint(model_v2, ckpt_v2_path) assert torch.allclose(model_v2.conv0.weight, model_v2_conv0_weight) assert torch.allclose(model_v2.conv1.weight, model_v2_conv1_weight)
def test_load_classes_name(): import os import tempfile from mmcv.runner import load_checkpoint, save_checkpoint checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') model = Model() save_checkpoint(model, checkpoint_path) checkpoint = load_checkpoint(model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta'])) model.CLASSES = ('class1', 'class2') save_checkpoint(model, checkpoint_path) checkpoint = load_checkpoint(model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' in checkpoint['meta'])) assert (checkpoint['meta']['CLASSES'] == ('class1', 'class2')) model = Model() wrapped_model = DDPWrapper(model) save_checkpoint(wrapped_model, checkpoint_path) checkpoint = load_checkpoint(wrapped_model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta'])) wrapped_model.module.CLASSES = ('class1', 'class2') save_checkpoint(wrapped_model, checkpoint_path) checkpoint = load_checkpoint(wrapped_model, checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' in checkpoint['meta'])) assert (checkpoint['meta']['CLASSES'] == ('class1', 'class2')) os.remove(checkpoint_path)
def test_checkpoint_loader(): import os import tempfile from mmcv.runner import CheckpointLoader, _load_checkpoint, save_checkpoint checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') model = Model() save_checkpoint(model, checkpoint_path) checkpoint = _load_checkpoint(checkpoint_path) assert (('meta' in checkpoint) and ('CLASSES' not in checkpoint['meta'])) os.remove(checkpoint_path) filenames = ['http://xx.xx/xx.pth', 'https://xx.xx/xx.pth', 'modelzoo://xx.xx/xx.pth', 'torchvision://xx.xx/xx.pth', 'open-mmlab://xx.xx/xx.pth', 'openmmlab://xx.xx/xx.pth', 'mmcls://xx.xx/xx.pth', 'pavi://xx.xx/xx.pth', 's3://xx.xx/xx.pth', 'ss3://xx.xx/xx.pth', ' s3://xx.xx/xx.pth', 'open-mmlab:s3://xx.xx/xx.pth', 'openmmlab:s3://xx.xx/xx.pth', 'openmmlabs3://xx.xx/xx.pth', ':s3://xx.xx/xx.path'] fn_names = ['load_from_http', 'load_from_http', 'load_from_torchvision', 'load_from_torchvision', 'load_from_openmmlab', 'load_from_openmmlab', 'load_from_mmcls', 'load_from_pavi', 'load_from_ceph', 'load_from_local', 'load_from_local', 'load_from_ceph', 'load_from_ceph', 'load_from_local', 'load_from_local'] for (filename, fn_name) in zip(filenames, fn_names): loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == fn_name) @CheckpointLoader.register_scheme(prefixes='ftp://') def load_from_ftp(filename, map_location): return dict(filename=filename) filename = 'ftp://xx.xx/xx.pth' loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == 'load_from_ftp') def load_from_ftp1(filename, map_location): return dict(filename=filename) with pytest.raises(KeyError): CheckpointLoader.register_scheme('ftp://', load_from_ftp1) CheckpointLoader.register_scheme('ftp://', load_from_ftp1, force=True) checkpoint = CheckpointLoader.load_checkpoint(filename) assert (checkpoint['filename'] == filename) loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == 'load_from_ftp1') @CheckpointLoader.register_scheme(prefixes='a/b') def load_from_ab(filename, map_location): return dict(filename=filename) @CheckpointLoader.register_scheme(prefixes='a/b/c') def load_from_abc(filename, map_location): return dict(filename=filename) filename = 'a/b/c/d' loader = CheckpointLoader._get_checkpoint_loader(filename) assert (loader.__name__ == 'load_from_abc')
def test_save_checkpoint(tmp_path): model = Model() optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9) with pytest.raises(TypeError): save_checkpoint(model, '/path/of/your/filename', meta='invalid type') filename = str((tmp_path / 'checkpoint1.pth')) save_checkpoint(model, filename) filename = str((tmp_path / 'checkpoint2.pth')) save_checkpoint(model, filename, optimizer) filename = str((tmp_path / 'checkpoint3.pth')) save_checkpoint(model, filename, meta={'test': 'test'}) filename = str((tmp_path / 'checkpoint4.pth')) save_checkpoint(model, filename, file_client_args={'backend': 'disk'}) with patch.object(PetrelBackend, 'put') as mock_method: filename = 's3://path/of/your/checkpoint1.pth' save_checkpoint(model, filename) mock_method.assert_called() with patch.object(PetrelBackend, 'put') as mock_method: filename = 's3://path//of/your/checkpoint2.pth' save_checkpoint(model, filename, file_client_args={'backend': 'petrel'}) mock_method.assert_called()
def test_load_from_local(): import os home_path = os.path.expanduser('~') checkpoint_path = os.path.join(home_path, 'dummy_checkpoint_used_to_test_load_from_local.pth') model = Model() save_checkpoint(model, checkpoint_path) checkpoint = load_from_local('~/dummy_checkpoint_used_to_test_load_from_local.pth', map_location=None) assert_tensor_equal(checkpoint['state_dict']['block.conv.weight'], model.block.conv.weight) os.remove(checkpoint_path)
@patch('torch.cuda.device_count', return_value=1) @patch('torch.cuda.set_device') @patch('torch.distributed.init_process_group') @patch('subprocess.getoutput', return_value='127.0.0.1') def test_init_dist(mock_getoutput, mock_dist_init, mock_set_device, mock_device_count): with pytest.raises(ValueError): init_dist('invaliad_launcher') os.environ['SLURM_PROCID'] = '0' os.environ['SLURM_NTASKS'] = '1' os.environ['SLURM_NODELIST'] = '[0]' init_dist('slurm') assert (os.environ['MASTER_PORT'] == '29500') assert (os.environ['MASTER_ADDR'] == '127.0.0.1') assert (os.environ['WORLD_SIZE'] == '1') assert (os.environ['RANK'] == '0') mock_set_device.assert_called_with(0) mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1') mock_dist_init.assert_called_with(backend='nccl') init_dist('slurm', port=29505) assert (os.environ['MASTER_PORT'] == '29505') assert (os.environ['MASTER_ADDR'] == '127.0.0.1') assert (os.environ['WORLD_SIZE'] == '1') assert (os.environ['RANK'] == '0') mock_set_device.assert_called_with(0) mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1') mock_dist_init.assert_called_with(backend='nccl') init_dist('slurm') assert (os.environ['MASTER_PORT'] == '29505') assert (os.environ['MASTER_ADDR'] == '127.0.0.1') assert (os.environ['WORLD_SIZE'] == '1') assert (os.environ['RANK'] == '0') mock_set_device.assert_called_with(0) mock_getoutput.assert_called_with('scontrol show hostname [0] | head -n1') mock_dist_init.assert_called_with(backend='nccl')
class ExampleDataset(Dataset): def __init__(self): self.index = 0 self.eval_result = [1, 4, 3, 7, 2, (- 3), 4, 6] def __getitem__(self, idx): results = dict(x=torch.tensor([1])) return results def __len__(self): return 1 @mock.create_autospec def evaluate(self, results, logger=None): pass
class EvalDataset(ExampleDataset): def evaluate(self, results, logger=None): acc = self.eval_result[self.index] output = OrderedDict(acc=acc, index=self.index, score=acc, loss_top=acc) self.index += 1 return output
class Model(nn.Module): def __init__(self): super().__init__() self.param = nn.Parameter(torch.tensor([1.0])) def forward(self, x, **kwargs): return (self.param * x) def train_step(self, data_batch, optimizer, **kwargs): return {'loss': torch.sum(self(data_batch['x']))} def val_step(self, data_batch, optimizer, **kwargs): return {'loss': torch.sum(self(data_batch['x']))}
def _build_epoch_runner(): model = Model() tmp_dir = tempfile.mkdtemp() runner = EpochBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo')) return runner
def _build_iter_runner(): model = Model() tmp_dir = tempfile.mkdtemp() runner = IterBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo')) return runner
class EvalHook(BaseEvalHook): _default_greater_keys = ['acc', 'top'] _default_less_keys = ['loss', 'loss_top'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
class DistEvalHook(BaseDistEvalHook): greater_keys = ['acc', 'top'] less_keys = ['loss', 'loss_top'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def test_eval_hook(): with pytest.raises(AssertionError): test_dataset = Model() data_loader = DataLoader(test_dataset) EvalHook(data_loader, save_best=True) with pytest.raises(TypeError): test_dataset = Model() data_loader = [DataLoader(test_dataset)] EvalHook(data_loader) with pytest.raises(ValueError): test_dataset = ExampleDataset() data_loader = DataLoader(test_dataset) EvalHook(data_loader, save_best='unsupport') with pytest.raises(KeyError): test_dataset = ExampleDataset() data_loader = DataLoader(test_dataset) EvalHook(data_loader, save_best='auto', rule='unsupport') with pytest.warns(UserWarning) as record_warnings: class _EvalDataset(ExampleDataset): def evaluate(self, results, logger=None): return {} test_dataset = _EvalDataset() data_loader = DataLoader(test_dataset) eval_hook = EvalHook(data_loader, save_best='auto') runner = _build_epoch_runner() runner.register_hook(eval_hook) runner.run([data_loader], [('train', 1)], 1) expected_message = 'Since `eval_res` is an empty dict, the behavior to save the best checkpoint will be skipped in this evaluation.' for warning in record_warnings: if (str(warning.message) == expected_message): break else: assert False test_dataset = ExampleDataset() loader = DataLoader(test_dataset) model = Model() data_loader = DataLoader(test_dataset) eval_hook = EvalHook(data_loader, save_best=None) with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 1) test_dataset.evaluate.assert_called_with(test_dataset, [torch.tensor([1])], logger=runner.logger) assert ((runner.meta is None) or ('best_score' not in runner.meta['hook_msgs'])) assert ((runner.meta is None) or ('best_ckpt' not in runner.meta['hook_msgs'])) loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, interval=1, save_best='auto') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 7) loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, interval=1, save_best='acc') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 7) loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, interval=1, save_best='loss_top') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_loss_top_epoch_6.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == (- 3)) data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, interval=1, save_best='score', rule='greater') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_score_epoch_4.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 7) data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, save_best='acc', rule='less') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == (- 3)) data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, save_best='acc') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 2) old_ckpt_path = osp.join(tmpdir, 'best_acc_epoch_2.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path) assert osp.exists(old_ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 4) resume_from = old_ckpt_path loader = DataLoader(ExampleDataset()) eval_hook = EvalHook(data_loader, save_best='acc') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.resume(resume_from) assert (runner.meta['hook_msgs']['best_ckpt'] == old_ckpt_path) assert osp.exists(old_ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 4) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_acc_epoch_4.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 7) assert (not osp.exists(old_ckpt_path)) loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, save_best='acc', test_fn=mock.MagicMock(return_value={}), greater_keys=[], less_keys=['acc']) with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) ckpt_path = osp.join(tmpdir, 'best_acc_epoch_6.pth') assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert osp.exists(ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == (- 3)) loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalDataset()) out_dir = 's3://user/data' eval_hook = EvalHook(data_loader, interval=1, save_best='auto', out_dir=out_dir) with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile, tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_eval') runner = EpochBasedRunner(model=model, work_dir=tmpdir, logger=logger) runner.register_checkpoint_hook(dict(interval=1)) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 8) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) ckpt_path = f'{out_dir}/{basename}/best_acc_epoch_4.pth' assert (runner.meta['hook_msgs']['best_ckpt'] == ckpt_path) assert (runner.meta['hook_msgs']['best_score'] == 7) assert (mock_put.call_count == 3) assert (mock_remove.call_count == 2) assert (mock_isfile.call_count == 2)
@patch('mmcv.engine.single_gpu_test', MagicMock) @patch('mmcv.engine.multi_gpu_test', MagicMock) @pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook]) @pytest.mark.parametrize('_build_demo_runner,by_epoch', [(_build_epoch_runner, True), (_build_iter_runner, False)]) def test_start_param(EvalHookParam, _build_demo_runner, by_epoch): dataloader = DataLoader(EvalDataset()) with pytest.raises(TypeError): EvalHookParam(dataloader=MagicMock(), interval=(- 1)) with pytest.raises(ValueError): EvalHookParam(dataloader, interval=(- 1)) with pytest.raises(ValueError): EvalHookParam(dataloader, start=(- 1)) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, interval=1, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert (evalhook.evaluate.call_count == 2) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=1, interval=1, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert (evalhook.evaluate.call_count == 2) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, interval=2, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert (evalhook.evaluate.call_count == 1) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=1, interval=2, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 3) assert (evalhook.evaluate.call_count == 2) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=0, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) runner.run([dataloader], [('train', 1)], 2) assert (evalhook.evaluate.call_count == 3) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=1, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) if by_epoch: runner._epoch = 2 else: runner._iter = 2 runner.run([dataloader], [('train', 1)], 3) assert (evalhook.evaluate.call_count == 2) runner = _build_demo_runner() evalhook = EvalHookParam(dataloader, start=2, by_epoch=by_epoch) evalhook.evaluate = MagicMock() runner.register_hook(evalhook) if by_epoch: runner._epoch = 1 else: runner._iter = 1 runner.run([dataloader], [('train', 1)], 3) assert (evalhook.evaluate.call_count == 2)
@pytest.mark.parametrize('runner,by_epoch,eval_hook_priority', [(EpochBasedRunner, True, 'NORMAL'), (EpochBasedRunner, True, 'LOW'), (IterBasedRunner, False, 'LOW')]) def test_logger(runner, by_epoch, eval_hook_priority): loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalDataset()) eval_hook = EvalHook(data_loader, interval=1, by_epoch=by_epoch, save_best='acc') with tempfile.TemporaryDirectory() as tmpdir: logger = get_logger('test_logger') optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) runner = EpochBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logger) runner.register_logger_hooks(dict(interval=1, hooks=[dict(type='TextLoggerHook', by_epoch=by_epoch)])) runner.register_timer_hook(dict(type='IterTimerHook')) runner.register_hook(eval_hook, priority=eval_hook_priority) runner.run([loader], [('train', 1)], 1) path = osp.join(tmpdir, next(scandir(tmpdir, '.json'))) with open(path) as fr: fr.readline() train_log = json.loads(fr.readline()) assert ((train_log['mode'] == 'train') and ('time' in train_log)) val_log = json.loads(fr.readline()) assert ((val_log['mode'] == 'val') and ('time' not in val_log))
def test_cast_tensor_type(): inputs = torch.FloatTensor([5.0]) src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, torch.Tensor) assert (outputs.dtype == dst_type) inputs = torch.FloatTensor([5.0]) src_type = torch.float dst_type = torch.half outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, torch.Tensor) assert (outputs.dtype == dst_type) inputs = torch.IntTensor([5]) src_type = torch.float dst_type = torch.half outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, torch.Tensor) assert (outputs.dtype == inputs.dtype) inputs = 'tensor' src_type = str dst_type = str outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, str) inputs = np.array([5.0]) src_type = np.ndarray dst_type = np.ndarray outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, np.ndarray) inputs = dict(tensor_a=torch.FloatTensor([1.0]), tensor_b=torch.FloatTensor([2.0])) src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, dict) assert (outputs['tensor_a'].dtype == dst_type) assert (outputs['tensor_b'].dtype == dst_type) inputs = [torch.FloatTensor([1.0]), torch.FloatTensor([2.0])] src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, list) assert (outputs[0].dtype == dst_type) assert (outputs[1].dtype == dst_type) inputs = 5 outputs = cast_tensor_type(inputs, None, None) assert isinstance(outputs, int)
def test_auto_fp16(): with pytest.raises(TypeError): class ExampleObject(object): @auto_fp16() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) class ExampleModule(nn.Module): @auto_fp16() def forward(self, x, y): return (x, y) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) if torch.cuda.is_available(): model.cuda() (output_x, output_y) = model(input_x.cuda(), input_y.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) class ExampleModule(nn.Module): @auto_fp16(apply_to=('x',)) def forward(self, x, y): return (x, y) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y) = model(input_x.cuda(), input_y.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.float32) class ExampleModule(nn.Module): @auto_fp16(apply_to=('x', 'y')) def forward(self, x, y=None, z=None): return (x, y, z) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.float32) input_z = torch.ones(1, dtype=torch.float32) (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.float32) class ExampleModule(nn.Module): @auto_fp16(apply_to=('x', 'y'), out_fp32=True) def forward(self, x, y=None, z=None): return (x, y, z) model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.float32) input_z = torch.ones(1, dtype=torch.float32) (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32) model.fp16_enabled = True (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.float32)
def test_force_fp32(): with pytest.raises(TypeError): class ExampleObject(object): @force_fp32() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) class ExampleModule(nn.Module): @force_fp32() def forward(self, x, y): return (x, y) model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.half) (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) model.fp16_enabled = True (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) if torch.cuda.is_available(): model.cuda() (output_x, output_y) = model(input_x.cuda(), input_y.cuda()) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) class ExampleModule(nn.Module): @force_fp32(apply_to=('x',)) def forward(self, x, y): return (x, y) model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.half) (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) model.fp16_enabled = True (output_x, output_y) = model(input_x, input_y) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.half) if torch.cuda.is_available(): model.cuda() (output_x, output_y) = model(input_x.cuda(), input_y.cuda()) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.half) class ExampleModule(nn.Module): @force_fp32(apply_to=('x', 'y')) def forward(self, x, y=None, z=None): return (x, y, z) model = ExampleModule() input_x = torch.ones(1, dtype=torch.half) input_y = torch.ones(1, dtype=torch.half) input_z = torch.ones(1, dtype=torch.half) (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.half) model.fp16_enabled = True (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.half) if torch.cuda.is_available(): model.cuda() (output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.float32) assert (output_z.dtype == torch.half) class ExampleModule(nn.Module): @force_fp32(apply_to=('x', 'y'), out_fp16=True) def forward(self, x, y=None, z=None): return (x, y, z) model = ExampleModule() input_x = torch.ones(1, dtype=torch.float32) input_y = torch.ones(1, dtype=torch.half) input_z = torch.ones(1, dtype=torch.half) (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.float32) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.half) model.fp16_enabled = True (output_x, output_y, output_z) = model(input_x, y=input_y, z=input_z) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.half) if torch.cuda.is_available(): model.cuda() (output_x, output_y, output_z) = model(input_x.cuda(), y=input_y.cuda(), z=input_z.cuda()) assert (output_x.dtype == torch.half) assert (output_y.dtype == torch.half) assert (output_z.dtype == torch.half)
def test_optimizerhook(): class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1) self.conv2 = nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1) self.conv3 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1) def forward(self, x): x1 = self.conv1(x) x2 = self.conv2(x1) return (x1, x2) model = Model() x = torch.rand(1, 1, 3, 3) dummy_runner = Mock() dummy_runner.optimizer.zero_grad = Mock(return_value=None) dummy_runner.optimizer.step = Mock(return_value=None) dummy_runner.model = model dummy_runner.outputs = dict() dummy_runner.outputs['num_samples'] = 0 class DummyLogger(): def __init__(self): self.msg = '' def log(self, msg=None, **kwargs): self.msg += msg dummy_runner.logger = DummyLogger() optimizer_hook = OptimizerHook(dict(max_norm=2), detect_anomalous_params=True) dummy_runner.outputs['loss'] = model(x)[0].sum() optimizer_hook.after_train_iter(dummy_runner) assert ('conv2.weight' in dummy_runner.logger.msg) assert ('conv2.bias' in dummy_runner.logger.msg) assert ('conv3.weight' in dummy_runner.logger.msg) assert ('conv3.bias' in dummy_runner.logger.msg) assert ('conv1.weight' not in dummy_runner.logger.msg) assert ('conv1.bias' not in dummy_runner.logger.msg) dummy_runner.outputs['loss'] = model(x)[1].sum() dummy_runner.logger.msg = '' optimizer_hook.after_train_iter(dummy_runner) assert ('conv3.weight' in dummy_runner.logger.msg) assert ('conv3.bias' in dummy_runner.logger.msg) assert ('conv2.weight' not in dummy_runner.logger.msg) assert ('conv2.bias' not in dummy_runner.logger.msg) assert ('conv1.weight' not in dummy_runner.logger.msg) assert ('conv1.bias' not in dummy_runner.logger.msg)
def test_checkpoint_hook(tmp_path): 'xdoctest -m tests/test_runner/test_hooks.py test_checkpoint_hook.' loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner('EpochBasedRunner', max_epochs=1) runner.meta = dict() checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'epoch_1.pth')) shutil.rmtree(runner.work_dir) runner = _build_demo_runner('EpochBasedRunner', max_epochs=4) runner.meta = dict() out_dir = 's3://user/data' with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile: checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=True, max_keep_ckpts=2) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'epoch_4.pth'])) mock_put.assert_called() mock_remove.assert_called() mock_isfile.assert_called() shutil.rmtree(runner.work_dir) runner = _build_demo_runner('IterBasedRunner', max_iters=1, max_epochs=None) runner.meta = dict() checkpointhook = CheckpointHook(interval=1, by_epoch=False) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'iter_1.pth')) shutil.rmtree(runner.work_dir) runner = _build_demo_runner('IterBasedRunner', max_iters=4, max_epochs=None) runner.meta = dict() out_dir = 's3://user/data' with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile: checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=False, max_keep_ckpts=2) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'iter_4.pth'])) mock_put.assert_called() mock_remove.assert_called() mock_isfile.assert_called() shutil.rmtree(runner.work_dir)
def test_ema_hook(): 'xdoctest -m tests/test_hooks.py test_ema_hook.' class DemoModel(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=1, padding=1, bias=True) self._init_weight() def _init_weight(self): constant_(self.conv.weight, 0) constant_(self.conv.bias, 0) def forward(self, x): return self.conv(x).sum() def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) loader = DataLoader(torch.ones((1, 1, 1, 1))) runner = _build_demo_runner() demo_model = DemoModel() runner.model = demo_model emahook = EMAHook(momentum=0.1, interval=2, warm_up=100, resume_from=None) checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.register_hook(emahook, priority='HIGHEST') runner.register_hook(checkpointhook) runner.run([loader, loader], [('train', 1), ('val', 1)]) checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth') contain_ema_buffer = False for (name, value) in checkpoint['state_dict'].items(): if ('ema' in name): contain_ema_buffer = True assert (value.sum() == 0) value.fill_(1) else: assert (value.sum() == 0) assert contain_ema_buffer torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth') work_dir = runner.work_dir resume_ema_hook = EMAHook(momentum=0.5, warm_up=0, resume_from=f'{work_dir}/epoch_1.pth') runner = _build_demo_runner(max_epochs=2) runner.model = demo_model runner.register_hook(resume_ema_hook, priority='HIGHEST') checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.register_hook(checkpointhook) runner.run([loader, loader], [('train', 1), ('val', 1)]) checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth') contain_ema_buffer = False for (name, value) in checkpoint['state_dict'].items(): if ('ema' in name): contain_ema_buffer = True assert (value.sum() == 2) else: assert (value.sum() == 1) assert contain_ema_buffer shutil.rmtree(runner.work_dir) shutil.rmtree(work_dir)
def test_custom_hook(): @HOOKS.register_module() class ToyHook(Hook): def __init__(self, info, *args, **kwargs): super().__init__() self.info = info runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1) runner.register_custom_hooks(None) assert (len(runner.hooks) == 0) custom_hooks_cfg = [dict(type='ToyHook', priority=51, info=51), dict(type='ToyHook', priority=49, info=49)] runner.register_custom_hooks(custom_hooks_cfg) assert ([hook.info for hook in runner.hooks] == [49, 51]) runner.register_custom_hooks(ToyHook(info='default')) assert ((len(runner.hooks) == 3) and (runner.hooks[1].info == 'default')) shutil.rmtree(runner.work_dir) runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1) priority_ranks = ['HIGHEST', 'VERY_HIGH', 'HIGH', 'ABOVE_NORMAL', 'NORMAL', 'BELOW_NORMAL', 'LOW', 'VERY_LOW', 'LOWEST'] random_priority_ranks = priority_ranks.copy() random.shuffle(random_priority_ranks) custom_hooks_cfg = [dict(type='ToyHook', priority=rank, info=rank) for rank in random_priority_ranks] runner.register_custom_hooks(custom_hooks_cfg) assert ([hook.info for hook in runner.hooks] == priority_ranks) shutil.rmtree(runner.work_dir) runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1) custom_hooks_cfg = [dict(type='ToyHook', priority=1, info='custom 1'), dict(type='ToyHook', priority='NORMAL', info='custom normal'), dict(type='ToyHook', priority=89, info='custom 89')] runner.register_training_hooks(lr_config=ToyHook('lr'), optimizer_config=ToyHook('optimizer'), checkpoint_config=ToyHook('checkpoint'), log_config=dict(interval=1, hooks=[dict(type='ToyHook', info='log')]), momentum_config=ToyHook('momentum'), timer_config=ToyHook('timer'), custom_hooks_config=custom_hooks_cfg) hooks_order = ['custom 1', 'lr', 'momentum', 'optimizer', 'checkpoint', 'custom normal', 'timer', 'custom 89', 'log'] assert ([hook.info for hook in runner.hooks] == hooks_order) shutil.rmtree(runner.work_dir)
def test_pavi_hook(): sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner() runner.meta = dict(config_dict=dict(lr=0.02, gpu_ids=range(1))) hook = PaviLoggerHook(add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') hook.writer.add_scalars.assert_called_with('val', {'learning_rate': 0.02, 'momentum': 0.95}, 1) if (platform.system() == 'Windows'): snapshot_file_path = osp.join(runner.work_dir, 'latest.pth') else: snapshot_file_path = osp.join(runner.work_dir, 'epoch_1.pth') hook.writer.add_snapshot_file.assert_called_with(tag=runner.work_dir.split('/')[(- 1)], snapshot_file_path=snapshot_file_path, iteration=1)
def test_sync_buffers_hook(): loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner() runner.register_hook_from_cfg(dict(type='SyncBuffersHook')) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir)
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)]) def test_momentum_runner_hook(multi_optimizers, max_iters, gamma, cyclic_times): 'xdoctest -m tests/test_hooks.py test_momentum_runner_hook.' sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='CyclicMomentumUpdaterHook', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=cyclic_times, step_ratio_up=0.4, gamma=gamma) runner.register_hook_from_cfg(hook_cfg) hook_cfg = dict(type='CyclicLrUpdaterHook', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.01999999999999999, 'learning_rate/model2': 0.009999999999999995, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.2, 'learning_rate/model2': 0.1, 'momentum/model1': 0.85, 'momentum/model2': 0.8052631578947369}, 5), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.875, 'momentum/model2': 0.8289473684210527}, 7)] else: calls = [call('train', {'learning_rate': 0.01999999999999999, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.11, 'momentum': 0.85}, 3), call('train', {'learning_rate': 0.1879422863405995, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.11000000000000001, 'momentum': 0.9}, 8)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True) sys.modules['pavi'] = MagicMock() runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='constant', warmup_iters=5, warmup_ratio=0.5, step=[10]) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 5), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 5), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True) sys.modules['pavi'] = MagicMock() runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='linear', warmup_iters=5, warmup_ratio=0.5, step=[10]) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.3571428571428572, 'momentum/model2': 1.2857142857142858}, 3), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.3571428571428572}, 3), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True) sys.modules['pavi'] = MagicMock() runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, warmup='exp', warmup_iters=5, warmup_ratio=0.5, step=[10]) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.9, 'momentum/model2': 1.8}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 1.4399307381848783, 'momentum/model2': 1.3641449098593583}, 3), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 1.9}, 1), call('train', {'learning_rate': 0.02, 'momentum': 1.4399307381848783}, 3), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.parametrize('multi_optimizers', (True, False)) def test_cosine_runner_hook(multi_optimizers): 'xdoctest -m tests/test_hooks.py test_cosine_runner_hook.' sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='CosineAnnealingMomentumUpdaterHook', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95)) runner.register_hook_from_cfg(hook_cfg) hook_cfg = dict(type='CosineAnnealingLrUpdaterHook', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.97, 'momentum/model2': 0.9189473684210527}, 6), call('train', {'learning_rate/model1': 0.0004894348370484647, 'learning_rate/model2': 0.00024471741852423234, 'momentum/model1': 0.9890211303259032, 'momentum/model2': 0.9369673866245399}, 10)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.97}, 6), call('train', {'learning_rate': 0.0004894348370484647, 'momentum': 0.9890211303259032}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.parametrize('multi_optimizers, by_epoch', [(False, False), (True, False), (False, True), (True, True)]) def test_flat_cosine_runner_hook(multi_optimizers, by_epoch): 'xdoctest -m tests/test_hooks.py test_flat_cosine_runner_hook.' sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) max_epochs = (10 if by_epoch else 1) runner = _build_demo_runner(multi_optimizers=multi_optimizers, max_epochs=max_epochs) with pytest.raises(ValueError): FlatCosineAnnealingLrUpdaterHook(start_percent=(- 0.1), min_lr_ratio=0) hook_cfg = dict(type='FlatCosineAnnealingLrUpdaterHook', by_epoch=by_epoch, min_lr_ratio=0, warmup='linear', warmup_iters=(10 if by_epoch else 2), warmup_ratio=0.9, start_percent=0.5) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: if by_epoch: calls = [call('train', {'learning_rate/model1': 0.018000000000000002, 'learning_rate/model2': 0.009000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 11), call('train', {'learning_rate/model1': 0.018090169943749474, 'learning_rate/model2': 0.009045084971874737, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 61), call('train', {'learning_rate/model1': 0.0019098300562505265, 'learning_rate/model2': 0.0009549150281252633, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 100)] else: calls = [call('train', {'learning_rate/model1': 0.018000000000000002, 'learning_rate/model2': 0.009000000000000001, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0.018090169943749474, 'learning_rate/model2': 0.009045084971874737, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 7), call('train', {'learning_rate/model1': 0.0019098300562505265, 'learning_rate/model2': 0.0009549150281252633, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)] elif by_epoch: calls = [call('train', {'learning_rate': 0.018000000000000002, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 11), call('train', {'learning_rate': 0.018090169943749474, 'momentum': 0.95}, 61), call('train', {'learning_rate': 0.0019098300562505265, 'momentum': 0.95}, 100)] else: calls = [call('train', {'learning_rate': 0.018000000000000002, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.018090169943749474, 'momentum': 0.95}, 7), call('train', {'learning_rate': 0.0019098300562505265, 'momentum': 0.95}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.parametrize('multi_optimizers, max_iters', [(True, 10), (True, 2), (False, 10), (False, 2)]) def test_one_cycle_runner_hook(multi_optimizers, max_iters): 'Test OneCycleLrUpdaterHook and OneCycleMomentumUpdaterHook.' with pytest.raises(AssertionError): OneCycleLrUpdaterHook(max_lr=0.1, by_epoch=True) with pytest.raises(ValueError): OneCycleLrUpdaterHook(max_lr=0.1, pct_start=(- 0.1)) with pytest.raises(ValueError): OneCycleLrUpdaterHook(max_lr=0.1, anneal_strategy='sin') sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='OneCycleMomentumUpdaterHook', base_momentum=0.85, max_momentum=0.95, pct_start=0.5, anneal_strategy='cos', three_phase=False) runner.register_hook_from_cfg(hook_cfg) hook_cfg = dict(type='OneCycleLrUpdaterHook', max_lr=0.01, pct_start=0.5, anneal_strategy='cos', div_factor=25, final_div_factor=10000.0, three_phase=False) runner.register_hook_from_cfg(hook_cfg) runner.register_hook_from_cfg(dict(type='IterTimerHook')) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.0003999999999999993, 'learning_rate/model2': 0.0003999999999999993, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 1), call('train', {'learning_rate/model1': 0.00904508879153485, 'learning_rate/model2': 0.00904508879153485, 'momentum/model1': 0.8595491502812526, 'momentum/model2': 0.8595491502812526}, 6), call('train', {'learning_rate/model1': 4e-08, 'learning_rate/model2': 4e-08, 'momentum/model1': 0.95, 'momentum/model2': 0.95}, 10)] else: calls = [call('train', {'learning_rate': 0.0003999999999999993, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.00904508879153485, 'momentum': 0.8595491502812526}, 6), call('train', {'learning_rate': 4e-08, 'momentum': 0.95}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True) sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters) args = dict(max_lr=0.01, total_steps=5, pct_start=0.5, anneal_strategy='linear', div_factor=25, final_div_factor=10000.0) hook = OneCycleLrUpdaterHook(**args) runner.register_hook(hook) if (max_iters == 10): with pytest.raises(ValueError): runner.run([loader], [('train', 1)]) else: runner.run([loader], [('train', 1)]) lr_last = runner.current_lr() t = torch.tensor([0.0], requires_grad=True) optim = torch.optim.SGD([t], lr=0.01) lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(optim, **args) lr_target = [] for _ in range(max_iters): optim.step() lr_target.append(optim.param_groups[0]['lr']) lr_scheduler.step() assert (lr_target[(- 1)] == lr_last[0])
@pytest.mark.parametrize('multi_optimizers', (True, False)) def test_cosine_restart_lr_update_hook(multi_optimizers): 'Test CosineRestartLrUpdaterHook.' with pytest.raises(AssertionError): CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5, 0.5], min_lr=0.1, min_lr_ratio=0) with pytest.raises(AssertionError): CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5], min_lr_ratio=0) with pytest.raises(ValueError): sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner() hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 2], restart_weights=[0.5, 0.5], min_lr=0.0001) runner.register_hook(hook) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook = CosineRestartLrUpdaterHook(by_epoch=False, periods=[5, 5], restart_weights=[0.5, 0.5], min_lr_ratio=0) runner.register_hook(hook) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6), call('train', {'learning_rate/model1': 0.0009549150281252633, 'learning_rate/model2': 0.00047745751406263163, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 10)] else: calls = [call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.0009549150281252633, 'momentum': 0.95}, 10)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.parametrize('multi_optimizers', (True, False)) def test_step_runner_hook(multi_optimizers): 'Test StepLrUpdaterHook.' with pytest.raises(TypeError): StepLrUpdaterHook() with pytest.raises(AssertionError): StepLrUpdaterHook((- 10)) with pytest.raises(AssertionError): StepLrUpdaterHook([10, 16, (- 20)]) sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((30, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, step=5, gamma=0.5, min_momentum=0.05) runner.register_hook_from_cfg(hook_cfg) hook = StepLrUpdaterHook(by_epoch=False, step=5, gamma=0.5, min_lr=0.001) runner.register_hook(hook) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.01, 'learning_rate/model2': 0.005, 'momentum/model1': 0.475, 'momentum/model2': 0.45}, 6), call('train', {'learning_rate/model1': 0.0025, 'learning_rate/model2': 0.00125, 'momentum/model1': 0.11875, 'momentum/model2': 0.1125}, 16), call('train', {'learning_rate/model1': 0.00125, 'learning_rate/model2': 0.001, 'momentum/model1': 0.059375, 'momentum/model2': 0.05625}, 21), call('train', {'learning_rate/model1': 0.001, 'learning_rate/model2': 0.001, 'momentum/model1': 0.05, 'momentum/model2': 0.05}, 26), call('train', {'learning_rate/model1': 0.001, 'learning_rate/model2': 0.001, 'momentum/model1': 0.05, 'momentum/model2': 0.05}, 30)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.01, 'momentum': 0.475}, 6), call('train', {'learning_rate': 0.0025, 'momentum': 0.11875}, 16), call('train', {'learning_rate': 0.00125, 'momentum': 0.059375}, 21), call('train', {'learning_rate': 0.001, 'momentum': 0.05}, 26), call('train', {'learning_rate': 0.001, 'momentum': 0.05}, 30)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True) sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers) hook_cfg = dict(type='StepMomentumUpdaterHook', by_epoch=False, step=[4, 6, 8], gamma=0.1) runner.register_hook_from_cfg(hook_cfg) hook = StepLrUpdaterHook(by_epoch=False, step=[4, 6, 8], gamma=0.1) runner.register_hook(hook) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.002, 'learning_rate/model2': 0.001, 'momentum/model1': 0.095, 'momentum/model2': 0.09000000000000001}, 5), call('train', {'learning_rate/model1': 0.00020000000000000004, 'learning_rate/model2': 0.00010000000000000002, 'momentum/model1': 0.009500000000000001, 'momentum/model2': 0.009000000000000003}, 7), call('train', {'learning_rate/model1': 2.0000000000000005e-05, 'learning_rate/model2': 1.0000000000000003e-05, 'momentum/model1': 0.0009500000000000002, 'momentum/model2': 0.0009000000000000002}, 9)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.002, 'momentum': 0.095}, 5), call('train', {'learning_rate': 0.00020000000000000004, 'momentum': 0.009500000000000001}, 7), call('train', {'learning_rate': 2.0000000000000005e-05, 'momentum': 0.0009500000000000002}, 9)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)]) def test_cyclic_lr_update_hook(multi_optimizers, max_iters, gamma, cyclic_times): 'Test CyclicLrUpdateHook.' with pytest.raises(AssertionError): CyclicLrUpdaterHook(by_epoch=True) with pytest.raises(AssertionError): CyclicLrUpdaterHook(by_epoch=False, target_ratio=(10.0, 0.1, 0.2)) with pytest.raises(AssertionError): CyclicLrUpdaterHook(by_epoch=False, step_ratio_up=1.4) with pytest.raises(ValueError): CyclicLrUpdaterHook(by_epoch=False, anneal_strategy='sin') with pytest.raises(AssertionError): CyclicLrUpdaterHook(by_epoch=False, gamma=0) sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(runner_type='IterBasedRunner', max_epochs=None, max_iters=max_iters, multi_optimizers=multi_optimizers) schedule_hook = CyclicLrUpdaterHook(by_epoch=False, target_ratio=(10.0, 1.0), cyclic_times=cyclic_times, step_ratio_up=0.5, anneal_strategy='linear', gamma=gamma) runner.register_hook(schedule_hook) runner.register_hook_from_cfg(dict(type='IterTimerHook')) runner.register_hook(IterTimerHook()) hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([loader], [('train', 1)]) shutil.rmtree(runner.work_dir) assert hasattr(hook, 'writer') if multi_optimizers: calls = [call('train', {'learning_rate/model1': 0.02, 'learning_rate/model2': 0.01, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 1), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 4), call('train', {'learning_rate/model1': 0.155, 'learning_rate/model2': 0.0775, 'momentum/model1': 0.95, 'momentum/model2': 0.9}, 6)] else: calls = [call('train', {'learning_rate': 0.02, 'momentum': 0.95}, 1), call('train', {'learning_rate': 0.11, 'momentum': 0.95}, 4), call('train', {'learning_rate': 0.065, 'momentum': 0.95}, 6), call('train', {'learning_rate': 0.11, 'momentum': 0.95}, 7)] hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
@pytest.mark.parametrize('log_model', (True, False)) def test_mlflow_hook(log_model): sys.modules['mlflow'] = MagicMock() sys.modules['mlflow.pytorch'] = MagicMock() runner = _build_demo_runner() loader = DataLoader(torch.ones((5, 2))) hook = MlflowLoggerHook(exp_name='test', log_model=log_model) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) hook.mlflow.set_experiment.assert_called_with('test') hook.mlflow.log_metrics.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6) if log_model: hook.mlflow_pytorch.log_model.assert_called_with(runner.model, 'models', pip_requirements=[f'torch=={TORCH_VERSION}']) else: assert (not hook.mlflow_pytorch.log_model.called)
def test_segmind_hook(): sys.modules['segmind'] = MagicMock() runner = _build_demo_runner() hook = SegmindLoggerHook() loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) hook.mlflow_log.assert_called_with(hook.log_metrics, {'learning_rate': 0.02, 'momentum': 0.95}, step=runner.epoch, epoch=runner.epoch)
def test_wandb_hook(): sys.modules['wandb'] = MagicMock() runner = _build_demo_runner() hook = WandbLoggerHook(log_artifact=True) loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) hook.wandb.init.assert_called_with() hook.wandb.log.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6, commit=True) hook.wandb.log_artifact.assert_called() hook.wandb.join.assert_called_with()
def test_neptune_hook(): sys.modules['neptune'] = MagicMock() sys.modules['neptune.new'] = MagicMock() runner = _build_demo_runner() hook = NeptuneLoggerHook() loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) hook.neptune.init.assert_called_with() hook.run['momentum'].log.assert_called_with(0.95, step=6) hook.run.stop.assert_called_with()
def test_dvclive_hook(): sys.modules['dvclive'] = MagicMock() runner = _build_demo_runner() hook = DvcliveLoggerHook() dvclive_mock = hook.dvclive loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) dvclive_mock.set_step.assert_called_with(6) dvclive_mock.log.assert_called_with('momentum', 0.95)
def test_dvclive_hook_model_file(tmp_path): sys.modules['dvclive'] = MagicMock() runner = _build_demo_runner() hook = DvcliveLoggerHook(model_file=osp.join(runner.work_dir, 'model.pth')) runner.register_hook(hook) loader = torch.utils.data.DataLoader(torch.ones((5, 2))) loader = DataLoader(torch.ones((5, 2))) runner.run([loader, loader], [('train', 1), ('val', 1)]) assert osp.exists(osp.join(runner.work_dir, 'model.pth')) shutil.rmtree(runner.work_dir)
def _build_demo_runner_without_hook(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False): class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(2, 1) self.conv = nn.Conv2d(3, 3, 3) def forward(self, x): return self.linear(x) def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x)) model = Model() if multi_optimizers: optimizer = {'model1': torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95), 'model2': torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9)} else: optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95) tmp_dir = tempfile.mkdtemp() runner = build_runner(dict(type=runner_type), default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), max_epochs=max_epochs, max_iters=max_iters)) return runner
def _build_demo_runner(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False): log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')]) runner = _build_demo_runner_without_hook(runner_type, max_epochs, max_iters, multi_optimizers) runner.register_checkpoint_hook(dict(interval=1)) runner.register_logger_hooks(log_config) return runner
def test_runner_with_revise_keys(): import os class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1) class PrefixModel(nn.Module): def __init__(self): super().__init__() self.backbone = Model() pmodel = PrefixModel() model = Model() checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth') torch.save(model.state_dict(), checkpoint_path) runner = _build_demo_runner(runner_type='EpochBasedRunner') runner.model = pmodel state_dict = runner.load_checkpoint(checkpoint_path, revise_keys=[('^', 'backbone.')]) for key in pmodel.backbone.state_dict().keys(): assert torch.equal(pmodel.backbone.state_dict()[key], state_dict[key]) torch.save(pmodel.state_dict(), checkpoint_path) runner.model = model state_dict = runner.load_checkpoint(checkpoint_path, revise_keys=[('^backbone\\.', '')]) for key in state_dict.keys(): key_stripped = re.sub('^backbone\\.', '', key) assert torch.equal(model.state_dict()[key_stripped], state_dict[key]) os.remove(checkpoint_path)
def test_get_triggered_stages(): class ToyHook(Hook): def before_run(): pass def after_epoch(): pass hook = ToyHook() expected_stages = ['before_run', 'after_train_epoch', 'after_val_epoch'] assert (hook.get_triggered_stages() == expected_stages)
def test_gradient_cumulative_optimizer_hook(): class ToyModel(nn.Module): def __init__(self, with_norm=False): super().__init__() self.fp16_enabled = False self.fc = nn.Linear(3, 2) nn.init.constant_(self.fc.weight, 1.0) nn.init.constant_(self.fc.bias, 1.0) self.with_norm = with_norm if with_norm: self.norm = nn.BatchNorm1d(2) def forward(self, x): x = self.fc(x) if self.with_norm: x = self.norm(x) return x def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x).mean(), num_samples=x.shape[0]) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x).mean(), num_samples=x.shape[0]) def build_toy_runner(config=dict(type='EpochBasedRunner', max_epochs=3)): model = ToyModel() optimizer = torch.optim.SGD(model.parameters(), lr=0.02) tmp_dir = tempfile.mkdtemp() runner = build_runner(config, default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), meta=dict())) return runner with pytest.raises(AssertionError): GradientCumulativeOptimizerHook(cumulative_iters='str') with pytest.raises(AssertionError): GradientCumulativeOptimizerHook(cumulative_iters=(- 1)) data = torch.rand((6, 3)) loader_1 = DataLoader(data, batch_size=1) runner_1 = build_toy_runner() optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3) runner_1.register_hook(optimizer_hook) runner_1.run([loader_1], [('train', 1)]) loader_2 = DataLoader(data, batch_size=3) runner_2 = build_toy_runner() optimizer_hook = OptimizerHook(grad_clip=dict(max_norm=0.2)) runner_2.register_hook(optimizer_hook) runner_2.run([loader_2], [('train', 1)]) assert (runner_1.model.fc.weight < 1).all() assert (runner_1.model.fc.bias < 1).all() assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight) assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias) shutil.rmtree(runner_1.work_dir) shutil.rmtree(runner_2.work_dir) data = torch.rand((8, 3)) loader_1 = DataLoader(data, batch_size=1) runner_1 = build_toy_runner(dict(type='IterBasedRunner', max_iters=8)) optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3) runner_1.register_hook(optimizer_hook) runner_1.run([loader_1], [('train', 1)]) loader_2_divisible = DataLoader(data[:6], batch_size=3) loader_2_remainder = DataLoader(data[6:], batch_size=2) runner_2 = build_toy_runner(dict(type='IterBasedRunner', max_iters=3)) optimizer_hook = OptimizerHook(grad_clip=dict(max_norm=0.2)) runner_2.register_hook(optimizer_hook) runner_2.run([loader_2_divisible, loader_2_remainder], [('train', 2), ('train', 1)]) assert (runner_1.model.fc.weight < 1).all() assert (runner_1.model.fc.bias < 1).all() assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight) assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias) shutil.rmtree(runner_1.work_dir) shutil.rmtree(runner_2.work_dir) model = ToyModel(with_norm=True) optimizer_hook = GradientCumulativeOptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3) assert optimizer_hook.has_batch_norm(model)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_gradient_cumulative_fp16_optimizer_hook(): class ToyModel(nn.Module): def __init__(self): super().__init__() self.fp16_enabled = False self.fc = nn.Linear(3, 2) nn.init.constant_(self.fc.weight, 1.0) nn.init.constant_(self.fc.bias, 1.0) @auto_fp16(apply_to=('x',)) def forward(self, x): x = self.fc(x) return x def train_step(self, x, optimizer, **kwargs): return dict(loss=self(x).mean(), num_samples=x.shape[0]) def val_step(self, x, optimizer, **kwargs): return dict(loss=self(x).mean(), num_samples=x.shape[0]) def build_toy_runner(config=dict(type='EpochBasedRunner', max_epochs=3)): model = ToyModel().cuda() optimizer = torch.optim.SGD(model.parameters(), lr=0.02) tmp_dir = tempfile.mkdtemp() runner = build_runner(config, default_args=dict(model=model, work_dir=tmp_dir, optimizer=optimizer, logger=logging.getLogger(), meta=dict())) return runner data = torch.rand((6, 3)).cuda() loader_1 = DataLoader(data, batch_size=1) runner_1 = build_toy_runner() optimizer_hook = GradientCumulativeFp16OptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3) runner_1.register_hook(optimizer_hook) runner_1.run([loader_1], [('train', 1)]) loader_2 = DataLoader(data, batch_size=3) runner_2 = build_toy_runner() optimizer_hook = Fp16OptimizerHook(grad_clip=dict(max_norm=0.2)) runner_2.register_hook(optimizer_hook) runner_2.run([loader_2], [('train', 1)]) assert (runner_1.model.fc.weight < 1).all() assert (runner_1.model.fc.bias < 1).all() assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight) assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias) shutil.rmtree(runner_1.work_dir) shutil.rmtree(runner_2.work_dir) data = torch.rand((8, 3)).cuda() loader_1 = DataLoader(data, batch_size=1) runner_1 = build_toy_runner(dict(type='IterBasedRunner', max_iters=8)) optimizer_hook = GradientCumulativeFp16OptimizerHook(grad_clip=dict(max_norm=0.2), cumulative_iters=3) runner_1.register_hook(optimizer_hook) runner_1.run([loader_1], [('train', 1)]) loader_2_divisible = DataLoader(data[:6], batch_size=3) loader_2_remainder = DataLoader(data[6:], batch_size=2) runner_2 = build_toy_runner(dict(type='IterBasedRunner', max_iters=3)) optimizer_hook = Fp16OptimizerHook(grad_clip=dict(max_norm=0.2)) runner_2.register_hook(optimizer_hook) runner_2.run([loader_2_divisible, loader_2_remainder], [('train', 2), ('train', 1)]) assert (runner_1.model.fc.weight < 1).all() assert (runner_1.model.fc.bias < 1).all() assert torch.allclose(runner_1.model.fc.weight, runner_2.model.fc.weight) assert torch.allclose(runner_1.model.fc.bias, runner_2.model.fc.bias) shutil.rmtree(runner_1.work_dir) shutil.rmtree(runner_2.work_dir)
class SubModel(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2) self.gn = nn.GroupNorm(2, 2) self.param1 = nn.Parameter(torch.ones(1)) def forward(self, x): return x
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.param1 = nn.Parameter(torch.ones(1)) self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) self.conv2 = nn.Conv2d(4, 2, kernel_size=1) self.bn = nn.BatchNorm2d(2) self.sub = SubModel() if OPS_AVAILABLE: from mmcv.ops import DeformConv2dPack self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1) def forward(self, x): return x
class ExampleDuplicateModel(nn.Module): def __init__(self): super().__init__() self.param1 = nn.Parameter(torch.ones(1)) self.conv1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False)) self.conv2 = nn.Sequential(nn.Conv2d(4, 2, kernel_size=1)) self.bn = nn.BatchNorm2d(2) self.sub = SubModel() self.conv3 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False)) self.conv3[0] = self.conv1[0] if OPS_AVAILABLE: from mmcv.ops import DeformConv2dPack self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1) def forward(self, x): return x
class PseudoDataParallel(nn.Module): def __init__(self): super().__init__() self.module = ExampleModel() def forward(self, x): return x
def check_default_optimizer(optimizer, model, prefix=''): assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) param_groups = optimizer.param_groups[0] if OPS_AVAILABLE: param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'dcn.weight', 'dcn.conv_offset.weight', 'dcn.conv_offset.bias'] else: param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias'] param_dict = dict(model.named_parameters()) assert (len(param_groups['params']) == len(param_names)) for i in range(len(param_groups['params'])): assert torch.equal(param_groups['params'][i], param_dict[(prefix + param_names[i])])
def check_sgd_optimizer(optimizer, model, prefix='', bias_lr_mult=1, bias_decay_mult=1, norm_decay_mult=1, dwconv_decay_mult=1, dcn_offset_lr_mult=1, bypass_duplicate=False): param_groups = optimizer.param_groups assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) model_parameters = list(model.parameters()) assert (len(param_groups) == len(model_parameters)) for (i, param) in enumerate(model_parameters): param_group = param_groups[i] assert torch.equal(param_group['params'][0], param) assert (param_group['momentum'] == momentum) param1 = param_groups[0] assert (param1['lr'] == base_lr) assert (param1['weight_decay'] == base_wd) conv1_weight = param_groups[1] assert (conv1_weight['lr'] == base_lr) assert (conv1_weight['weight_decay'] == base_wd) conv2_weight = param_groups[2] assert (conv2_weight['lr'] == base_lr) assert (conv2_weight['weight_decay'] == base_wd) conv2_bias = param_groups[3] assert (conv2_bias['lr'] == (base_lr * bias_lr_mult)) assert (conv2_bias['weight_decay'] == (base_wd * bias_decay_mult)) bn_weight = param_groups[4] assert (bn_weight['lr'] == base_lr) assert (bn_weight['weight_decay'] == (base_wd * norm_decay_mult)) bn_bias = param_groups[5] assert (bn_bias['lr'] == base_lr) assert (bn_bias['weight_decay'] == (base_wd * norm_decay_mult)) sub_param1 = param_groups[6] assert (sub_param1['lr'] == base_lr) assert (sub_param1['weight_decay'] == base_wd) sub_conv1_weight = param_groups[7] assert (sub_conv1_weight['lr'] == base_lr) assert (sub_conv1_weight['weight_decay'] == (base_wd * dwconv_decay_mult)) sub_conv1_bias = param_groups[8] assert (sub_conv1_bias['lr'] == (base_lr * bias_lr_mult)) assert (sub_conv1_bias['weight_decay'] == (base_wd * dwconv_decay_mult)) sub_gn_weight = param_groups[9] assert (sub_gn_weight['lr'] == base_lr) assert (sub_gn_weight['weight_decay'] == (base_wd * norm_decay_mult)) sub_gn_bias = param_groups[10] assert (sub_gn_bias['lr'] == base_lr) assert (sub_gn_bias['weight_decay'] == (base_wd * norm_decay_mult)) if torch.cuda.is_available(): dcn_conv_weight = param_groups[11] assert (dcn_conv_weight['lr'] == base_lr) assert (dcn_conv_weight['weight_decay'] == base_wd) dcn_offset_weight = param_groups[12] assert (dcn_offset_weight['lr'] == (base_lr * dcn_offset_lr_mult)) assert (dcn_offset_weight['weight_decay'] == base_wd) dcn_offset_bias = param_groups[13] assert (dcn_offset_bias['lr'] == (base_lr * dcn_offset_lr_mult)) assert (dcn_offset_bias['weight_decay'] == base_wd)
def test_default_optimizer_constructor(): model = ExampleModel() with pytest.raises(TypeError): optimizer_cfg = [] optim_constructor = DefaultOptimizerConstructor(optimizer_cfg) optim_constructor(model) with pytest.raises(TypeError): optimizer_cfg = dict(lr=0.0001) paramwise_cfg = ['error'] optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optim_constructor(model) with pytest.raises(ValueError): optimizer_cfg = dict(lr=0.0001, weight_decay=None) paramwise_cfg = dict(bias_decay_mult=1, norm_decay_mult=1) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optim_constructor(model) optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg) optimizer = optim_constructor(model) check_default_optimizer(optimizer, model) model = PseudoDataParallel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = None optim_constructor = DefaultOptimizerConstructor(optimizer_cfg) optimizer = optim_constructor(model) check_default_optimizer(optimizer, model, prefix='module.') if torch.cuda.is_available(): model = torch.nn.DataParallel(ExampleModel()) optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = None optim_constructor = DefaultOptimizerConstructor(optimizer_cfg) optimizer = optim_constructor(model) check_default_optimizer(optimizer, model, prefix='module.') model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict() optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) check_default_optimizer(optimizer, model) model = ExampleModel() for param in model.parameters(): param.requires_grad = False optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict() optim_constructor = DefaultOptimizerConstructor(optimizer_cfg) optimizer = optim_constructor(model) check_default_optimizer(optimizer, model) model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) check_sgd_optimizer(optimizer, model, **paramwise_cfg) model = ExampleModel() optimizer_cfg = dict(type='Rprop', lr=base_lr) paramwise_cfg = dict(bias_lr_mult=2) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) param_groups = optimizer.param_groups assert isinstance(optimizer, torch.optim.Rprop) assert (optimizer.defaults['lr'] == base_lr) model_parameters = list(model.parameters()) assert (len(param_groups) == len(model_parameters)) for (i, param) in enumerate(model_parameters): param_group = param_groups[i] assert torch.equal(param_group['params'][0], param) assert (param_groups[0]['lr'] == base_lr) assert (param_groups[1]['lr'] == base_lr) assert (param_groups[2]['lr'] == base_lr) assert (param_groups[3]['lr'] == (base_lr * paramwise_cfg['bias_lr_mult'])) assert (param_groups[4]['lr'] == base_lr) assert (param_groups[5]['lr'] == base_lr) assert (param_groups[6]['lr'] == base_lr) assert (param_groups[7]['lr'] == base_lr) assert (param_groups[8]['lr'] == (base_lr * paramwise_cfg['bias_lr_mult'])) assert (param_groups[9]['lr'] == base_lr) assert (param_groups[10]['lr'] == base_lr) if OPS_AVAILABLE: assert (param_groups[11]['lr'] == base_lr) assert (param_groups[12]['lr'] == base_lr) assert (param_groups[13]['lr'] == base_lr) model = PseudoDataParallel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) check_sgd_optimizer(optimizer, model, prefix='module.', **paramwise_cfg) if torch.cuda.is_available(): model = torch.nn.DataParallel(ExampleModel()) optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) check_sgd_optimizer(optimizer, model, prefix='module.', **paramwise_cfg) for param in model.parameters(): param.requires_grad = False optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) param_groups = optimizer.param_groups assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) for (i, (name, param)) in enumerate(model.named_parameters()): param_group = param_groups[i] assert torch.equal(param_group['params'][0], param) assert (param_group['momentum'] == momentum) assert (param_group['lr'] == base_lr) assert (param_group['weight_decay'] == base_wd) model = ExampleDuplicateModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1) with pytest.raises(ValueError) as excinfo: optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optim_constructor(model) assert ('some parameters appear in more than one parameter group' == excinfo.value) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1, bypass_duplicate=True) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) with warnings.catch_warnings(record=True) as w: optimizer = optim_constructor(model) warnings.simplefilter('always') assert (len(w) == 1) assert (str(w[0].message) == 'conv3.0 is duplicate. It is skipped since bypass_duplicate=True') model_parameters = list(model.parameters()) num_params = (14 if OPS_AVAILABLE else 11) assert (len(optimizer.param_groups) == len(model_parameters) == num_params) check_sgd_optimizer(optimizer, model, **paramwise_cfg) model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(custom_keys={'param1': dict(lr_mult=10), 'sub': dict(lr_mult=0.1, decay_mult=0), 'sub.gn': dict(lr_mult=0.01), 'non_exist_key': dict(lr_mult=0.0)}, norm_decay_mult=0.5) with pytest.raises(TypeError): paramwise_cfg_ = dict(custom_keys=[0.1, 0.0001]) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg_) optimizer = optim_constructor(model) with pytest.raises(ValueError): optimizer_cfg_ = dict(type='SGD', lr=0.01) paramwise_cfg_ = dict(custom_keys={'.backbone': dict(decay_mult=0.5)}) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg_, paramwise_cfg_) optimizer = optim_constructor(model) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) param_groups = optimizer.param_groups groups = [] group_settings = [] groups.append(['param1', 'sub.param1']) group_settings.append({'lr': (base_lr * 10), 'momentum': momentum, 'weight_decay': base_wd}) groups.append(['sub.gn.weight', 'sub.gn.bias']) group_settings.append({'lr': (base_lr * 0.01), 'momentum': momentum, 'weight_decay': base_wd}) groups.append(['sub.conv1.weight', 'sub.conv1.bias']) group_settings.append({'lr': (base_lr * 0.1), 'momentum': momentum, 'weight_decay': 0}) groups.append(['bn.weight', 'bn.bias']) group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': (base_wd * 0.5)}) groups.append(['conv1.weight', 'conv2.weight', 'conv2.bias']) group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': base_wd}) num_params = (14 if OPS_AVAILABLE else 11) assert (len(param_groups) == num_params) for (i, (name, param)) in enumerate(model.named_parameters()): assert torch.equal(param_groups[i]['params'][0], param) for (group, settings) in zip(groups, group_settings): if (name in group): for setting in settings: assert (param_groups[i][setting] == settings[setting]), f'{name} {setting}' model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, momentum=momentum) paramwise_cfg = dict(custom_keys={'param1': dict(lr_mult=10)}) optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg) optimizer = optim_constructor(model) assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == 0) param_groups = optimizer.param_groups groups = [] group_settings = [] groups.append(['param1', 'sub.param1']) group_settings.append({'lr': (base_lr * 10), 'momentum': momentum, 'weight_decay': 0}) groups.append(['sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias']) group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': 0}) num_params = (14 if OPS_AVAILABLE else 11) assert (len(param_groups) == num_params) for (i, (name, param)) in enumerate(model.named_parameters()): assert torch.equal(param_groups[i]['params'][0], param) for (group, settings) in zip(groups, group_settings): if (name in group): for setting in settings: assert (param_groups[i][setting] == settings[setting]), f'{name} {setting}'
def test_torch_optimizers(): torch_optimizers = ['ASGD', 'Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'LBFGS', 'Optimizer', 'RMSprop', 'Rprop', 'SGD', 'SparseAdam'] assert set(torch_optimizers).issubset(set(TORCH_OPTIMIZERS))
def test_build_optimizer_constructor(): model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1) optim_constructor_cfg = dict(type='DefaultOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg) optim_constructor = build_optimizer_constructor(optim_constructor_cfg) optimizer = optim_constructor(model) check_sgd_optimizer(optimizer, model, **paramwise_cfg) from mmcv.runner import OPTIMIZERS from mmcv.utils import build_from_cfg @OPTIMIZER_BUILDERS.register_module() class MyOptimizerConstructor(DefaultOptimizerConstructor): def __call__(self, model): if hasattr(model, 'module'): model = model.module conv1_lr_mult = self.paramwise_cfg.get('conv1_lr_mult', 1.0) params = [] for (name, param) in model.named_parameters(): param_group = {'params': [param]} if (name.startswith('conv1') and param.requires_grad): param_group['lr'] = (self.base_lr * conv1_lr_mult) params.append(param_group) optimizer_cfg['params'] = params return build_from_cfg(optimizer_cfg, OPTIMIZERS) paramwise_cfg = dict(conv1_lr_mult=5) optim_constructor_cfg = dict(type='MyOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg) optim_constructor = build_optimizer_constructor(optim_constructor_cfg) optimizer = optim_constructor(model) param_groups = optimizer.param_groups assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) for (i, param) in enumerate(model.parameters()): param_group = param_groups[i] assert torch.equal(param_group['params'][0], param) assert (param_group['momentum'] == momentum) assert (param_groups[1]['lr'] == (base_lr * paramwise_cfg['conv1_lr_mult'])) assert (param_groups[1]['weight_decay'] == base_wd)
def test_build_optimizer(): model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) optimizer = build_optimizer(model, optimizer_cfg) check_default_optimizer(optimizer, model) model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum, paramwise_cfg=dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)) optimizer = build_optimizer(model, optimizer_cfg) check_sgd_optimizer(optimizer, model, **optimizer_cfg['paramwise_cfg'])
class OldStyleModel(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1)
class Model(OldStyleModel): def train_step(self): pass def val_step(self): pass
def test_build_runner(): temp_root = tempfile.gettempdir() dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)]) default_args = dict(model=Model(), work_dir=osp.join(temp_root, dir_name), logger=logging.getLogger()) cfg = dict(type='EpochBasedRunner', max_epochs=1) runner = build_runner(cfg, default_args=default_args) assert (runner._max_epochs == 1) cfg = dict(type='IterBasedRunner', max_iters=1) runner = build_runner(cfg, default_args=default_args) assert (runner._max_iters == 1) with pytest.raises(ValueError, match='Only one of'): cfg = dict(type='IterBasedRunner', max_epochs=1, max_iters=1) runner = build_runner(cfg, default_args=default_args)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_epoch_based_runner(runner_class): with pytest.warns(DeprecationWarning): model = OldStyleModel() def batch_processor(): pass _ = runner_class(model, batch_processor, logger=logging.getLogger()) with pytest.raises(TypeError): model = OldStyleModel() _ = runner_class(model, batch_processor=0, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() optimizer = 'NotAOptimizer' _ = runner_class(model, optimizer=optimizer, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() optimizers = dict(optim1=torch.optim.Adam(), optim2='NotAOptimizer') _ = runner_class(model, optimizer=optimizers, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() _ = runner_class(model, logger=None) with pytest.raises(TypeError): model = Model() _ = runner_class(model, logger=logging.getLogger(), meta=['list']) with pytest.raises(AssertionError): model = OldStyleModel() _ = runner_class(model, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() _ = runner_class(model, work_dir=1, logger=logging.getLogger()) with pytest.raises(RuntimeError): def batch_processor(): pass model = Model() _ = runner_class(model, batch_processor, logger=logging.getLogger()) model = Model() temp_root = tempfile.gettempdir() dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)]) work_dir = osp.join(temp_root, dir_name) _ = runner_class(model, work_dir=work_dir, logger=logging.getLogger()) assert osp.isdir(work_dir) _ = runner_class(model, work_dir=work_dir, logger=logging.getLogger()) assert osp.isdir(work_dir) os.removedirs(work_dir)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_runner_with_parallel(runner_class): def batch_processor(): pass model = MMDataParallel(OldStyleModel()) _ = runner_class(model, batch_processor, logger=logging.getLogger()) model = MMDataParallel(Model()) _ = runner_class(model, logger=logging.getLogger()) with pytest.raises(RuntimeError): def batch_processor(): pass model = MMDataParallel(Model()) _ = runner_class(model, batch_processor, logger=logging.getLogger())
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_save_checkpoint(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) with pytest.raises(TypeError): runner.save_checkpoint('.', meta=list()) with tempfile.TemporaryDirectory() as root: runner.save_checkpoint(root) latest_path = osp.join(root, 'latest.pth') assert osp.exists(latest_path) if isinstance(runner, EpochBasedRunner): first_ckp_path = osp.join(root, 'epoch_1.pth') elif isinstance(runner, IterBasedRunner): first_ckp_path = osp.join(root, 'iter_1.pth') assert osp.exists(first_ckp_path) if (platform.system() != 'Windows'): assert (osp.realpath(latest_path) == osp.realpath(first_ckp_path)) else: pass torch.load(latest_path)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_build_lr_momentum_hook(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 1) lr_config = dict(policy='Cyclic', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 2) lr_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 3) lr_config = dict(policy='Step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11]) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 4) lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11]) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 5) mom_config = dict(policy='CosineAnnealing', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95)) runner.register_momentum_hook(mom_config) assert (len(runner.hooks) == 6) mom_config = dict(policy='Cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4) runner.register_momentum_hook(mom_config) assert (len(runner.hooks) == 7) mom_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4) runner.register_momentum_hook(mom_config) assert (len(runner.hooks) == 8)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_register_timer_hook(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) timer_config = None runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 0) timer_config = dict(type='IterTimerHook') runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 1) assert isinstance(runner.hooks[0], IterTimerHook) timer_config = IterTimerHook() runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 2) assert isinstance(runner.hooks[1], IterTimerHook)
def test_set_random_seed(): set_random_seed(0) a_random = random.randint(0, 10) a_np_random = np.random.rand(2, 2) a_torch_random = torch.rand(2, 2) assert (torch.backends.cudnn.deterministic is False) assert (torch.backends.cudnn.benchmark is False) assert (os.environ['PYTHONHASHSEED'] == str(0)) set_random_seed(0, True) b_random = random.randint(0, 10) b_np_random = np.random.rand(2, 2) b_torch_random = torch.rand(2, 2) assert (torch.backends.cudnn.deterministic is True) if is_rocm_pytorch: assert (torch.backends.cudnn.benchmark is True) else: assert (torch.backends.cudnn.benchmark is False) assert (a_random == b_random) assert np.equal(a_np_random, b_np_random).all() assert torch.equal(a_torch_random, b_torch_random)
def test_construct(): cfg = Config() assert (cfg.filename is None) assert (cfg.text == '') assert (len(cfg) == 0) assert (cfg._cfg_dict == {}) with pytest.raises(TypeError): Config([0, 1]) cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test') cfg_file = osp.join(data_path, 'config/a.py') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == cfg.pretty_text) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'a.py') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) cfg_file = osp.join(data_path, 'config/b.json') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == json.dumps(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'b.json') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) cfg_file = osp.join(data_path, 'config/c.yaml') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == yaml.dump(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'c.yaml') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) cfg_file = osp.join(data_path, 'config/h.py') path = osp.join(osp.dirname(__file__), 'data', 'config') path = Path(path).as_posix() cfg_dict = dict(item1='h.py', item2=path, item3='abc_h') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == cfg.pretty_text) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'h.py') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1']) assert (Config.fromfile(dump_file)['item2'] == cfg_dict['item2']) assert (Config.fromfile(dump_file)['item3'] == cfg_dict['item3']) cfg_dict = dict(item1='{{fileBasename}}', item2='{{ fileDirname}}', item3='abc_{{ fileBasenameNoExtension }}') assert Config.fromfile(cfg_file, False) assert (Config.fromfile(cfg_file, False)['item1'] == cfg_dict['item1']) assert (Config.fromfile(cfg_file, False)['item2'] == cfg_dict['item2']) assert (Config.fromfile(cfg_file, False)['item3'] == cfg_dict['item3']) cfg_file = osp.join(data_path, 'config/p.yaml') cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config')) cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == yaml.dump(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'p.yaml') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1']) assert Config.fromfile(cfg_file, False) assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}') cfg_file = osp.join(data_path, 'config/o.json') cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config')) cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == json.dumps(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'o.json') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1']) assert Config.fromfile(cfg_file, False) assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
def test_fromfile(): for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == ((osp.abspath(osp.expanduser(cfg_file)) + '\n') + open(cfg_file, 'r').read())) cfg_file = osp.join(data_path, 'config', 'q.py') imported_file = osp.join(data_path, 'config', 'r.py') target_pkg = osp.join(osp.dirname(__file__), 'r.py') shutil.copy(imported_file, target_pkg) Config.fromfile(cfg_file, import_custom_modules=True) assert (os.environ.pop('TEST_VALUE') == 'test') os.remove(target_pkg) with pytest.raises(FileNotFoundError): Config.fromfile('no_such_file.py') with pytest.raises(IOError): Config.fromfile(osp.join(data_path, 'color.jpg'))
def test_fromstring(): for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) file_format = osp.splitext(filename)[(- 1)] in_cfg = Config.fromfile(cfg_file) out_cfg = Config.fromstring(in_cfg.pretty_text, '.py') assert (in_cfg._cfg_dict == out_cfg._cfg_dict) cfg_str = open(cfg_file, 'r').read() out_cfg = Config.fromstring(cfg_str, file_format) assert (in_cfg._cfg_dict == out_cfg._cfg_dict) cfg_file = osp.join(data_path, 'config', 'b.json') in_cfg = Config.fromfile(cfg_file) with pytest.raises(Exception): Config.fromstring(in_cfg.pretty_text, '.json') cfg_str = open(cfg_file, 'r').read() with pytest.raises(Exception): Config.fromstring(cfg_str, '.py')
def test_merge_from_base(): cfg_file = osp.join(data_path, 'config/d.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) base_cfg_file = osp.join(data_path, 'config/base.py') merge_text = ((osp.abspath(osp.expanduser(base_cfg_file)) + '\n') + open(base_cfg_file, 'r').read()) merge_text += ((('\n' + osp.abspath(osp.expanduser(cfg_file))) + '\n') + open(cfg_file, 'r').read()) assert (cfg.text == merge_text) assert (cfg.item1 == [2, 3]) assert (cfg.item2.a == 1) assert (cfg.item3 is False) assert (cfg.item4 == 'test_base') with pytest.raises(TypeError): Config.fromfile(osp.join(data_path, 'config/e.py'))
def test_merge_from_multiple_bases(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == 'test') assert (cfg.item5 == dict(a=0, b=1)) assert (cfg.item6 == [dict(a=0), dict(b=1)]) assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3]))) with pytest.raises(KeyError): Config.fromfile(osp.join(data_path, 'config/m.py'))
def test_base_variables(): for file in ['t.py', 't.json', 't.yaml']: cfg_file = osp.join(data_path, f'config/{file}') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == 'test') assert (cfg.item5 == dict(a=0, b=1)) assert (cfg.item6 == [dict(a=0), dict(b=1)]) assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3]))) assert (cfg.item8 == file) assert (cfg.item9 == dict(a=0)) assert (cfg.item10 == [3.1, 4.2, 5.3]) for file in ['u.py', 'u.json', 'u.yaml']: cfg_file = osp.join(data_path, f'config/{file}') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.base == '_base_.item8') assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == 'test') assert (cfg.item5 == dict(a=0, b=1)) assert (cfg.item6 == [dict(a=0), dict(b=1)]) assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3]))) assert (cfg.item8 == 't.py') assert (cfg.item9 == dict(a=0)) assert (cfg.item10 == [3.1, 4.2, 5.3]) assert (cfg.item11 == 't.py') assert (cfg.item12 == dict(a=0)) assert (cfg.item13 == [3.1, 4.2, 5.3]) assert (cfg.item14 == [1, 2]) assert (cfg.item15 == dict(a=dict(b=dict(a=0)), b=[False], c=['test'], d=[[{'e': 0}], [{'a': 0}, {'b': 1}]], e=[1, 2])) cfg_file = osp.join(data_path, 'config/v.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item21 == 't.py') assert (cfg.item22 == 't.py') assert (cfg.item23 == [3.1, 4.2, 5.3]) assert (cfg.item24 == [3.1, 4.2, 5.3]) assert (cfg.item25 == dict(a=dict(b=[3.1, 4.2, 5.3]), b=[[3.1, 4.2, 5.3]], c=[[{'e': 't.py'}], [{'a': 0}, {'b': 1}]], e='t.py'))
def test_merge_recursive_bases(): cfg_file = osp.join(data_path, 'config/f.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [2, 3]) assert (cfg.item2.a == 1) assert (cfg.item3 is False) assert (cfg.item4 == 'test_recursive_bases')
def test_merge_from_dict(): cfg_file = osp.join(data_path, 'config/a.py') cfg = Config.fromfile(cfg_file) input_options = {'item2.a': 1, 'item2.b': 0.1, 'item3': False} cfg.merge_from_dict(input_options) assert (cfg.item2 == dict(a=1, b=0.1)) assert (cfg.item3 is False) cfg_file = osp.join(data_path, 'config/s.py') cfg = Config.fromfile(cfg_file) input_options = {'item.0.a': 1, 'item.1.b': 1} cfg.merge_from_dict(input_options, allow_list_keys=True) assert (cfg.item == [{'a': 1}, {'b': 1, 'c': 0}]) input_options = {'item.0.a': 1, 'item.1.b': 1} with pytest.raises(TypeError): cfg.merge_from_dict(input_options, allow_list_keys=False) input_options = {'item.2.a': 1} with pytest.raises(KeyError): cfg.merge_from_dict(input_options, allow_list_keys=True)
def test_merge_delete(): cfg_file = osp.join(data_path, 'config/delete.py') cfg = Config.fromfile(cfg_file) assert (cfg.item1 == dict(a=0)) assert (cfg.item2 == dict(a=0, b=0)) assert (cfg.item3 is True) assert (cfg.item4 == 'test') assert ('_delete_' not in cfg.item2) assert (type(cfg.item1) == ConfigDict) assert (type(cfg.item2) == ConfigDict)
def test_merge_intermediate_variable(): cfg_file = osp.join(data_path, 'config/i_child.py') cfg = Config.fromfile(cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2 == dict(a=0)) assert (cfg.item3 is True) assert (cfg.item4 == 'test') assert (cfg.item_cfg == dict(b=2)) assert (cfg.item5 == dict(cfg=dict(b=1))) assert (cfg.item6 == dict(cfg=dict(b=2)))
def test_fromfile_in_config(): cfg_file = osp.join(data_path, 'config/code.py') cfg = Config.fromfile(cfg_file) assert (cfg.cfg.item1 == [1, 2]) assert (cfg.cfg.item2 == dict(a=0)) assert (cfg.cfg.item3 is True) assert (cfg.cfg.item4 == 'test') assert (cfg.item5 == 1)
def test_dict(): cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test') for filename in ['a.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) cfg = Config.fromfile(cfg_file) assert (len(cfg) == 4) assert (set(cfg.keys()) == set(cfg_dict.keys())) assert (set(cfg._cfg_dict.keys()) == set(cfg_dict.keys())) for value in cfg.values(): assert (value in cfg_dict.values()) for (name, value) in cfg.items(): assert (name in cfg_dict) assert (value in cfg_dict.values()) assert (cfg.item1 == cfg_dict['item1']) assert (cfg.item2 == cfg_dict['item2']) assert (cfg.item2.a == 0) assert (cfg.item3 == cfg_dict['item3']) assert (cfg.item4 == cfg_dict['item4']) with pytest.raises(AttributeError): cfg.not_exist for name in ['item1', 'item2', 'item3', 'item4']: assert (name in cfg) assert (cfg[name] == cfg_dict[name]) assert (cfg.get(name) == cfg_dict[name]) assert (cfg.get('not_exist') is None) assert (cfg.get('not_exist', 0) == 0) with pytest.raises(KeyError): cfg['not_exist'] assert ('item1' in cfg) assert ('not_exist' not in cfg) cfg.update(dict(item1=0)) assert (cfg.item1 == 0) cfg.update(dict(item2=dict(a=1))) assert (cfg.item2.a == 1)
def test_setattr(): cfg = Config() cfg.item1 = [1, 2] cfg.item2 = {'a': 0} cfg['item5'] = {'a': {'b': None}} assert (cfg._cfg_dict['item1'] == [1, 2]) assert (cfg.item1 == [1, 2]) assert (cfg._cfg_dict['item2'] == {'a': 0}) assert (cfg.item2.a == 0) assert (cfg._cfg_dict['item5'] == {'a': {'b': None}}) assert (cfg.item5.a.b is None)
def test_pretty_text(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') with open(text_cfg_filename, 'w') as f: f.write(cfg.pretty_text) text_cfg = Config.fromfile(text_cfg_filename) assert (text_cfg._cfg_dict == cfg._cfg_dict)
def test_dict_action(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('--options', nargs='+', action=DictAction, help='custom options') args = parser.parse_args(['--options', 'item2.a=a,b', 'item2.b=[(a,b), [1,2], false]']) out_dict = {'item2.a': ['a', 'b'], 'item2.b': [('a', 'b'), [1, 2], False]} assert (args.options == out_dict) args = parser.parse_args(['--options', 'item2.a=[[1]]']) out_dict = {'item2.a': [[1]]} assert (args.options == out_dict) with pytest.raises(AssertionError): parser.parse_args(['--options', 'item2.a=[(a,b), [1,2], false']) args = parser.parse_args(['--options', 'item2.a=1', 'item2.b=0.1', 'item2.c=x', 'item3=false']) out_dict = {'item2.a': 1, 'item2.b': 0.1, 'item2.c': 'x', 'item3': False} assert (args.options == out_dict) cfg_file = osp.join(data_path, 'config/a.py') cfg = Config.fromfile(cfg_file) cfg.merge_from_dict(args.options) assert (cfg.item2 == dict(a=1, b=0.1, c='x')) assert (cfg.item3 is False)
def test_dump_mapping(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') cfg.dump(text_cfg_filename) text_cfg = Config.fromfile(text_cfg_filename) assert (text_cfg._cfg_dict == cfg._cfg_dict)
def test_reserved_key(): cfg_file = osp.join(data_path, 'config/g.py') with pytest.raises(KeyError): Config.fromfile(cfg_file)