code
stringlengths
17
6.64M
def test_build_runner(): temp_root = tempfile.gettempdir() dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)]) default_args = dict(model=Model(), work_dir=osp.join(temp_root, dir_name), logger=logging.getLogger()) cfg = dict(type='EpochBasedRunner', max_epochs=1) runner = build_runner(cfg, default_args=default_args) assert (runner._max_epochs == 1) cfg = dict(type='IterBasedRunner', max_iters=1) runner = build_runner(cfg, default_args=default_args) assert (runner._max_iters == 1) with pytest.raises(ValueError, match='Only one of'): cfg = dict(type='IterBasedRunner', max_epochs=1, max_iters=1) runner = build_runner(cfg, default_args=default_args)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_epoch_based_runner(runner_class): with pytest.warns(DeprecationWarning): model = OldStyleModel() def batch_processor(): pass _ = runner_class(model, batch_processor, logger=logging.getLogger()) with pytest.raises(TypeError): model = OldStyleModel() _ = runner_class(model, batch_processor=0, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() optimizer = 'NotAOptimizer' _ = runner_class(model, optimizer=optimizer, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() optimizers = dict(optim1=torch.optim.Adam(), optim2='NotAOptimizer') _ = runner_class(model, optimizer=optimizers, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() _ = runner_class(model, logger=None) with pytest.raises(TypeError): model = Model() _ = runner_class(model, logger=logging.getLogger(), meta=['list']) with pytest.raises(AssertionError): model = OldStyleModel() _ = runner_class(model, logger=logging.getLogger()) with pytest.raises(TypeError): model = Model() _ = runner_class(model, work_dir=1, logger=logging.getLogger()) with pytest.raises(RuntimeError): def batch_processor(): pass model = Model() _ = runner_class(model, batch_processor, logger=logging.getLogger()) model = Model() temp_root = tempfile.gettempdir() dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)]) work_dir = osp.join(temp_root, dir_name) _ = runner_class(model, work_dir=work_dir, logger=logging.getLogger()) assert osp.isdir(work_dir) _ = runner_class(model, work_dir=work_dir, logger=logging.getLogger()) assert osp.isdir(work_dir) os.removedirs(work_dir)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_runner_with_parallel(runner_class): def batch_processor(): pass model = MMDataParallel(OldStyleModel()) _ = runner_class(model, batch_processor, logger=logging.getLogger()) model = MMDataParallel(Model()) _ = runner_class(model, logger=logging.getLogger()) with pytest.raises(RuntimeError): def batch_processor(): pass model = MMDataParallel(Model()) _ = runner_class(model, batch_processor, logger=logging.getLogger())
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_save_checkpoint(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) with pytest.raises(TypeError): runner.save_checkpoint('.', meta=list()) with tempfile.TemporaryDirectory() as root: runner.save_checkpoint(root) latest_path = osp.join(root, 'latest.pth') assert osp.exists(latest_path) if isinstance(runner, EpochBasedRunner): first_ckp_path = osp.join(root, 'epoch_1.pth') elif isinstance(runner, IterBasedRunner): first_ckp_path = osp.join(root, 'iter_1.pth') assert osp.exists(first_ckp_path) if (platform.system() != 'Windows'): assert (osp.realpath(latest_path) == osp.realpath(first_ckp_path)) else: pass torch.load(latest_path)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_build_lr_momentum_hook(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 1) lr_config = dict(policy='Cyclic', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 2) lr_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 3) lr_config = dict(policy='Step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11]) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 4) lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11]) runner.register_lr_hook(lr_config) assert (len(runner.hooks) == 5) mom_config = dict(policy='CosineAnnealing', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95)) runner.register_momentum_hook(mom_config) assert (len(runner.hooks) == 6) mom_config = dict(policy='Cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4) runner.register_momentum_hook(mom_config) assert (len(runner.hooks) == 7) mom_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4) runner.register_momentum_hook(mom_config) assert (len(runner.hooks) == 8)
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_register_timer_hook(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) timer_config = None runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 0) timer_config = dict(type='IterTimerHook') runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 1) assert isinstance(runner.hooks[0], IterTimerHook) timer_config = IterTimerHook() runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 2) assert isinstance(runner.hooks[1], IterTimerHook)
def test_set_random_seed(): set_random_seed(0) a_random = random.randint(0, 10) a_np_random = np.random.rand(2, 2) a_torch_random = torch.rand(2, 2) assert (torch.backends.cudnn.deterministic is False) assert (torch.backends.cudnn.benchmark is False) assert (os.environ['PYTHONHASHSEED'] == str(0)) set_random_seed(0, True) b_random = random.randint(0, 10) b_np_random = np.random.rand(2, 2) b_torch_random = torch.rand(2, 2) assert (torch.backends.cudnn.deterministic is True) if is_rocm_pytorch: assert (torch.backends.cudnn.benchmark is True) else: assert (torch.backends.cudnn.benchmark is False) assert (a_random == b_random) assert np.equal(a_np_random, b_np_random).all() assert torch.equal(a_torch_random, b_torch_random)
def test_construct(): cfg = Config() assert (cfg.filename is None) assert (cfg.text == '') assert (len(cfg) == 0) assert (cfg._cfg_dict == {}) with pytest.raises(TypeError): Config([0, 1]) cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test') cfg_file = osp.join(data_path, 'config/a.py') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == cfg.pretty_text) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'a.py') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) cfg_file = osp.join(data_path, 'config/b.json') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == json.dumps(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'b.json') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) cfg_file = osp.join(data_path, 'config/c.yaml') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == yaml.dump(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'c.yaml') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) cfg_file = osp.join(data_path, 'config/h.py') path = osp.join(osp.dirname(__file__), 'data', 'config') path = Path(path).as_posix() cfg_dict = dict(item1='h.py', item2=path, item3='abc_h') cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == cfg.pretty_text) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'h.py') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1']) assert (Config.fromfile(dump_file)['item2'] == cfg_dict['item2']) assert (Config.fromfile(dump_file)['item3'] == cfg_dict['item3']) cfg_dict = dict(item1='{{fileBasename}}', item2='{{ fileDirname}}', item3='abc_{{ fileBasenameNoExtension }}') assert Config.fromfile(cfg_file, False) assert (Config.fromfile(cfg_file, False)['item1'] == cfg_dict['item1']) assert (Config.fromfile(cfg_file, False)['item2'] == cfg_dict['item2']) assert (Config.fromfile(cfg_file, False)['item3'] == cfg_dict['item3']) cfg_file = osp.join(data_path, 'config/p.yaml') cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config')) cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == yaml.dump(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'p.yaml') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1']) assert Config.fromfile(cfg_file, False) assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}') cfg_file = osp.join(data_path, 'config/o.json') cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config')) cfg = Config(cfg_dict, filename=cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == open(cfg_file, 'r').read()) assert (cfg.dump() == json.dumps(cfg_dict)) with tempfile.TemporaryDirectory() as temp_config_dir: dump_file = osp.join(temp_config_dir, 'o.json') cfg.dump(dump_file) assert (cfg.dump() == open(dump_file, 'r').read()) assert Config.fromfile(dump_file) assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1']) assert Config.fromfile(cfg_file, False) assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
def test_fromfile(): for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == ((osp.abspath(osp.expanduser(cfg_file)) + '\n') + open(cfg_file, 'r').read())) cfg_file = osp.join(data_path, 'config', 'q.py') imported_file = osp.join(data_path, 'config', 'r.py') target_pkg = osp.join(osp.dirname(__file__), 'r.py') shutil.copy(imported_file, target_pkg) Config.fromfile(cfg_file, import_custom_modules=True) assert (os.environ.pop('TEST_VALUE') == 'test') os.remove(target_pkg) with pytest.raises(FileNotFoundError): Config.fromfile('no_such_file.py') with pytest.raises(IOError): Config.fromfile(osp.join(data_path, 'color.jpg'))
def test_fromstring(): for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) file_format = osp.splitext(filename)[(- 1)] in_cfg = Config.fromfile(cfg_file) out_cfg = Config.fromstring(in_cfg.pretty_text, '.py') assert (in_cfg._cfg_dict == out_cfg._cfg_dict) cfg_str = open(cfg_file, 'r').read() out_cfg = Config.fromstring(cfg_str, file_format) assert (in_cfg._cfg_dict == out_cfg._cfg_dict) cfg_file = osp.join(data_path, 'config', 'b.json') in_cfg = Config.fromfile(cfg_file) with pytest.raises(Exception): Config.fromstring(in_cfg.pretty_text, '.json') cfg_str = open(cfg_file, 'r').read() with pytest.raises(Exception): Config.fromstring(cfg_str, '.py')
def test_merge_from_base(): cfg_file = osp.join(data_path, 'config/d.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) base_cfg_file = osp.join(data_path, 'config/base.py') merge_text = ((osp.abspath(osp.expanduser(base_cfg_file)) + '\n') + open(base_cfg_file, 'r').read()) merge_text += ((('\n' + osp.abspath(osp.expanduser(cfg_file))) + '\n') + open(cfg_file, 'r').read()) assert (cfg.text == merge_text) assert (cfg.item1 == [2, 3]) assert (cfg.item2.a == 1) assert (cfg.item3 is False) assert (cfg.item4 == 'test_base') with pytest.raises(TypeError): Config.fromfile(osp.join(data_path, 'config/e.py'))
def test_merge_from_multiple_bases(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == 'test') assert (cfg.item5 == dict(a=0, b=1)) assert (cfg.item6 == [dict(a=0), dict(b=1)]) assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3]))) with pytest.raises(KeyError): Config.fromfile(osp.join(data_path, 'config/m.py'))
def test_base_variables(): for file in ['t.py', 't.json', 't.yaml']: cfg_file = osp.join(data_path, f'config/{file}') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == 'test') assert (cfg.item5 == dict(a=0, b=1)) assert (cfg.item6 == [dict(a=0), dict(b=1)]) assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3]))) assert (cfg.item8 == file) assert (cfg.item9 == dict(a=0)) assert (cfg.item10 == [3.1, 4.2, 5.3]) for file in ['u.py', 'u.json', 'u.yaml']: cfg_file = osp.join(data_path, f'config/{file}') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.base == '_base_.item8') assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == 'test') assert (cfg.item5 == dict(a=0, b=1)) assert (cfg.item6 == [dict(a=0), dict(b=1)]) assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3]))) assert (cfg.item8 == 't.py') assert (cfg.item9 == dict(a=0)) assert (cfg.item10 == [3.1, 4.2, 5.3]) assert (cfg.item11 == 't.py') assert (cfg.item12 == dict(a=0)) assert (cfg.item13 == [3.1, 4.2, 5.3]) assert (cfg.item14 == [1, 2]) assert (cfg.item15 == dict(a=dict(b=dict(a=0)), b=[False], c=['test'], d=[[{'e': 0}], [{'a': 0}, {'b': 1}]], e=[1, 2])) cfg_file = osp.join(data_path, 'config/v.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item21 == 't.py') assert (cfg.item22 == 't.py') assert (cfg.item23 == [3.1, 4.2, 5.3]) assert (cfg.item24 == [3.1, 4.2, 5.3]) assert (cfg.item25 == dict(a=dict(b=[3.1, 4.2, 5.3]), b=[[3.1, 4.2, 5.3]], c=[[{'e': 't.py'}], [{'a': 0}, {'b': 1}]], e='t.py'))
def test_merge_recursive_bases(): cfg_file = osp.join(data_path, 'config/f.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [2, 3]) assert (cfg.item2.a == 1) assert (cfg.item3 is False) assert (cfg.item4 == 'test_recursive_bases')
def test_merge_from_dict(): cfg_file = osp.join(data_path, 'config/a.py') cfg = Config.fromfile(cfg_file) input_options = {'item2.a': 1, 'item2.b': 0.1, 'item3': False} cfg.merge_from_dict(input_options) assert (cfg.item2 == dict(a=1, b=0.1)) assert (cfg.item3 is False) cfg_file = osp.join(data_path, 'config/s.py') cfg = Config.fromfile(cfg_file) input_options = {'item.0.a': 1, 'item.1.b': 1} cfg.merge_from_dict(input_options, allow_list_keys=True) assert (cfg.item == [{'a': 1}, {'b': 1, 'c': 0}]) input_options = {'item.0.a': 1, 'item.1.b': 1} with pytest.raises(TypeError): cfg.merge_from_dict(input_options, allow_list_keys=False) input_options = {'item.2.a': 1} with pytest.raises(KeyError): cfg.merge_from_dict(input_options, allow_list_keys=True)
def test_merge_delete(): cfg_file = osp.join(data_path, 'config/delete.py') cfg = Config.fromfile(cfg_file) assert (cfg.item1 == dict(a=0)) assert (cfg.item2 == dict(a=0, b=0)) assert (cfg.item3 is True) assert (cfg.item4 == 'test') assert ('_delete_' not in cfg.item2) assert (type(cfg.item1) == ConfigDict) assert (type(cfg.item2) == ConfigDict)
def test_merge_intermediate_variable(): cfg_file = osp.join(data_path, 'config/i_child.py') cfg = Config.fromfile(cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2 == dict(a=0)) assert (cfg.item3 is True) assert (cfg.item4 == 'test') assert (cfg.item_cfg == dict(b=2)) assert (cfg.item5 == dict(cfg=dict(b=1))) assert (cfg.item6 == dict(cfg=dict(b=2)))
def test_fromfile_in_config(): cfg_file = osp.join(data_path, 'config/code.py') cfg = Config.fromfile(cfg_file) assert (cfg.cfg.item1 == [1, 2]) assert (cfg.cfg.item2 == dict(a=0)) assert (cfg.cfg.item3 is True) assert (cfg.cfg.item4 == 'test') assert (cfg.item5 == 1)
def test_dict(): cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test') for filename in ['a.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) cfg = Config.fromfile(cfg_file) assert (len(cfg) == 4) assert (set(cfg.keys()) == set(cfg_dict.keys())) assert (set(cfg._cfg_dict.keys()) == set(cfg_dict.keys())) for value in cfg.values(): assert (value in cfg_dict.values()) for (name, value) in cfg.items(): assert (name in cfg_dict) assert (value in cfg_dict.values()) assert (cfg.item1 == cfg_dict['item1']) assert (cfg.item2 == cfg_dict['item2']) assert (cfg.item2.a == 0) assert (cfg.item3 == cfg_dict['item3']) assert (cfg.item4 == cfg_dict['item4']) with pytest.raises(AttributeError): cfg.not_exist for name in ['item1', 'item2', 'item3', 'item4']: assert (name in cfg) assert (cfg[name] == cfg_dict[name]) assert (cfg.get(name) == cfg_dict[name]) assert (cfg.get('not_exist') is None) assert (cfg.get('not_exist', 0) == 0) with pytest.raises(KeyError): cfg['not_exist'] assert ('item1' in cfg) assert ('not_exist' not in cfg) cfg.update(dict(item1=0)) assert (cfg.item1 == 0) cfg.update(dict(item2=dict(a=1))) assert (cfg.item2.a == 1)
def test_setattr(): cfg = Config() cfg.item1 = [1, 2] cfg.item2 = {'a': 0} cfg['item5'] = {'a': {'b': None}} assert (cfg._cfg_dict['item1'] == [1, 2]) assert (cfg.item1 == [1, 2]) assert (cfg._cfg_dict['item2'] == {'a': 0}) assert (cfg.item2.a == 0) assert (cfg._cfg_dict['item5'] == {'a': {'b': None}}) assert (cfg.item5.a.b is None)
def test_pretty_text(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') with open(text_cfg_filename, 'w') as f: f.write(cfg.pretty_text) text_cfg = Config.fromfile(text_cfg_filename) assert (text_cfg._cfg_dict == cfg._cfg_dict)
def test_dict_action(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('--options', nargs='+', action=DictAction, help='custom options') args = parser.parse_args(['--options', 'item2.a=a,b', 'item2.b=[(a,b), [1,2], false]']) out_dict = {'item2.a': ['a', 'b'], 'item2.b': [('a', 'b'), [1, 2], False]} assert (args.options == out_dict) args = parser.parse_args(['--options', 'item2.a=[[1]]']) out_dict = {'item2.a': [[1]]} assert (args.options == out_dict) with pytest.raises(AssertionError): parser.parse_args(['--options', 'item2.a=[(a,b), [1,2], false']) args = parser.parse_args(['--options', 'item2.a=1', 'item2.b=0.1', 'item2.c=x', 'item3=false']) out_dict = {'item2.a': 1, 'item2.b': 0.1, 'item2.c': 'x', 'item3': False} assert (args.options == out_dict) cfg_file = osp.join(data_path, 'config/a.py') cfg = Config.fromfile(cfg_file) cfg.merge_from_dict(args.options) assert (cfg.item2 == dict(a=1, b=0.1, c='x')) assert (cfg.item3 is False)
def test_dump_mapping(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') cfg.dump(text_cfg_filename) text_cfg = Config.fromfile(text_cfg_filename) assert (text_cfg._cfg_dict == cfg._cfg_dict)
def test_reserved_key(): cfg_file = osp.join(data_path, 'config/g.py') with pytest.raises(KeyError): Config.fromfile(cfg_file)
def test_syntax_error(): temp_cfg_file = tempfile.NamedTemporaryFile(suffix='.py', delete=False) temp_cfg_path = temp_cfg_file.name with open(temp_cfg_path, 'w') as f: f.write('a=0b=dict(c=1)') with pytest.raises(SyntaxError, match='There are syntax errors in config file'): Config.fromfile(temp_cfg_path) temp_cfg_file.close() os.remove(temp_cfg_path)
def test_pickle_support(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: pkl_cfg_filename = osp.join(temp_config_dir, '_pickle.pkl') dump(cfg, pkl_cfg_filename) pkl_cfg = load(pkl_cfg_filename) assert (pkl_cfg._cfg_dict == cfg._cfg_dict)
def test_deprecation(): deprecated_cfg_files = [osp.join(data_path, 'config/deprecated.py'), osp.join(data_path, 'config/deprecated_as_base.py')] for cfg_file in deprecated_cfg_files: with pytest.warns(DeprecationWarning): cfg = Config.fromfile(cfg_file) assert (cfg.item1 == 'expected')
def test_deepcopy(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) new_cfg = copy.deepcopy(cfg) assert isinstance(new_cfg, Config) assert (new_cfg._cfg_dict == cfg._cfg_dict) assert (new_cfg._cfg_dict is not cfg._cfg_dict) assert (new_cfg._filename == cfg._filename) assert (new_cfg._text == cfg._text)
def test_copy(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) new_cfg = copy.copy(cfg) assert isinstance(new_cfg, Config) assert (new_cfg is not cfg) assert (new_cfg._cfg_dict is cfg._cfg_dict) assert (new_cfg._filename == cfg._filename) assert (new_cfg._text == cfg._text)
def test_collect_env(): try: import torch except ModuleNotFoundError: pytest.skip('skipping tests that require PyTorch') from mmcv.utils import collect_env env_info = collect_env() expected_keys = ['sys.platform', 'Python', 'CUDA available', 'PyTorch', 'PyTorch compiling details', 'OpenCV', 'MMCV', 'MMCV Compiler', 'MMCV CUDA Compiler'] for key in expected_keys: assert (key in env_info) if env_info['CUDA available']: for key in ['CUDA_HOME', 'NVCC']: assert (key in env_info) if (sys.platform != 'win32'): assert ('GCC' in env_info) assert (env_info['sys.platform'] == sys.platform) assert (env_info['Python'] == sys.version.replace('\n', '')) assert (env_info['MMCV'] == mmcv.__version__)
def test_load_url(): url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth' url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth' if (digit_version(TORCH_VERSION) < digit_version('1.7.0')): model_zoo.load_url(url1) with pytest.raises(RuntimeError): model_zoo.load_url(url2) else: model_zoo.load_url(url1) model_zoo.load_url(url2) load_url(url1) if (digit_version(TORCH_VERSION) < digit_version('1.5.0')): with pytest.raises(RuntimeError): load_url(url2) else: load_url(url2)
@patch('torch.distributed.get_rank', (lambda : 0)) @patch('torch.distributed.is_initialized', (lambda : True)) @patch('torch.distributed.is_available', (lambda : True)) def test_get_logger_rank0(): logger = get_logger('rank0.pkg1') assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert isinstance(logger.handlers[0], logging.StreamHandler) assert (logger.handlers[0].level == logging.INFO) logger = get_logger('rank0.pkg2', log_level=logging.DEBUG) assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert (logger.handlers[0].level == logging.DEBUG) with tempfile.NamedTemporaryFile(delete=False) as f: logger = get_logger('rank0.pkg3', log_file=f.name) assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 2) assert isinstance(logger.handlers[0], logging.StreamHandler) assert isinstance(logger.handlers[1], logging.FileHandler) logger_pkg3 = get_logger('rank0.pkg3') assert (id(logger_pkg3) == id(logger)) logging.shutdown() os.remove(f.name) logger_pkg3 = get_logger('rank0.pkg3.subpkg') assert (logger_pkg3.handlers == logger_pkg3.handlers)
@patch('torch.distributed.get_rank', (lambda : 1)) @patch('torch.distributed.is_initialized', (lambda : True)) @patch('torch.distributed.is_available', (lambda : True)) def test_get_logger_rank1(): logger = get_logger('rank1.pkg1') assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert isinstance(logger.handlers[0], logging.StreamHandler) assert (logger.handlers[0].level == logging.INFO) with tempfile.NamedTemporaryFile(delete=False) as f: logger = get_logger('rank1.pkg2', log_file=f.name) assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert (logger.handlers[0].level == logging.INFO) logging.shutdown() os.remove(f.name)
def test_print_log_print(capsys): print_log('welcome', logger=None) (out, _) = capsys.readouterr() assert (out == 'welcome\n')
def test_print_log_silent(capsys, caplog): print_log('welcome', logger='silent') (out, _) = capsys.readouterr() assert (out == '') assert (len(caplog.records) == 0)
def test_print_log_logger(caplog): print_log('welcome', logger='mmcv') assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.INFO, 'welcome')) print_log('welcome', logger='mmcv', level=logging.ERROR) assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.ERROR, 'welcome')) with tempfile.NamedTemporaryFile(delete=False) as f: logger = get_logger('abc', log_file=f.name) print_log('welcome', logger=logger) assert (caplog.record_tuples[(- 1)] == ('abc', logging.INFO, 'welcome')) with open(f.name, 'r') as fin: log_text = fin.read() regex_time = '\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}' match = re.fullmatch((regex_time + ' - abc - INFO - welcome\\n'), log_text) assert (match is not None) logging.shutdown() os.remove(f.name)
def test_print_log_exception(): with pytest.raises(TypeError): print_log('welcome', logger=0)
def test_to_ntuple(): single_number = 2 assert (mmcv.utils.to_1tuple(single_number) == (single_number,)) assert (mmcv.utils.to_2tuple(single_number) == (single_number, single_number)) assert (mmcv.utils.to_3tuple(single_number) == (single_number, single_number, single_number)) assert (mmcv.utils.to_4tuple(single_number) == (single_number, single_number, single_number, single_number)) assert (mmcv.utils.to_ntuple(5)(single_number) == (single_number, single_number, single_number, single_number, single_number)) assert (mmcv.utils.to_ntuple(6)(single_number) == (single_number, single_number, single_number, single_number, single_number, single_number))
def test_iter_cast(): assert (mmcv.list_cast([1, 2, 3], int) == [1, 2, 3]) assert (mmcv.list_cast(['1.1', 2, '3'], float) == [1.1, 2.0, 3.0]) assert (mmcv.list_cast([1, 2, 3], str) == ['1', '2', '3']) assert (mmcv.tuple_cast((1, 2, 3), str) == ('1', '2', '3')) assert (next(mmcv.iter_cast([1, 2, 3], str)) == '1') with pytest.raises(TypeError): mmcv.iter_cast([1, 2, 3], '') with pytest.raises(TypeError): mmcv.iter_cast(1, str)
def test_is_seq_of(): assert mmcv.is_seq_of([1.0, 2.0, 3.0], float) assert mmcv.is_seq_of([(1,), (2,), (3,)], tuple) assert mmcv.is_seq_of((1.0, 2.0, 3.0), float) assert mmcv.is_list_of([1.0, 2.0, 3.0], float) assert (not mmcv.is_seq_of((1.0, 2.0, 3.0), float, seq_type=list)) assert (not mmcv.is_tuple_of([1.0, 2.0, 3.0], float)) assert (not mmcv.is_seq_of([1.0, 2, 3], int)) assert (not mmcv.is_seq_of((1.0, 2, 3), int))
def test_slice_list(): in_list = [1, 2, 3, 4, 5, 6] assert (mmcv.slice_list(in_list, [1, 2, 3]) == [[1], [2, 3], [4, 5, 6]]) assert (mmcv.slice_list(in_list, [len(in_list)]) == [in_list]) with pytest.raises(TypeError): mmcv.slice_list(in_list, 2.0) with pytest.raises(ValueError): mmcv.slice_list(in_list, [1, 2])
def test_concat_list(): assert (mmcv.concat_list([[1, 2]]) == [1, 2]) assert (mmcv.concat_list([[1, 2], [3, 4, 5], [6]]) == [1, 2, 3, 4, 5, 6])
def test_requires_package(capsys): @mmcv.requires_package('nnn') def func_a(): pass @mmcv.requires_package(['numpy', 'n1', 'n2']) def func_b(): pass @mmcv.requires_package('numpy') def func_c(): return 1 with pytest.raises(RuntimeError): func_a() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n') with pytest.raises(RuntimeError): func_b() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n') assert (func_c() == 1)
def test_requires_executable(capsys): @mmcv.requires_executable('nnn') def func_a(): pass @mmcv.requires_executable(['ls', 'n1', 'n2']) def func_b(): pass @mmcv.requires_executable('mv') def func_c(): return 1 with pytest.raises(RuntimeError): func_a() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n') with pytest.raises(RuntimeError): func_b() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n') assert (func_c() == 1)
def test_import_modules_from_strings(): import os.path as osp_ import sys as sys_ (osp, sys) = mmcv.import_modules_from_strings(['os.path', 'sys']) assert (osp == osp_) assert (sys == sys_) osp = mmcv.import_modules_from_strings('os.path') assert (osp == osp_) assert (mmcv.import_modules_from_strings(None) is None) assert (mmcv.import_modules_from_strings([]) is None) assert (mmcv.import_modules_from_strings('') is None) with pytest.raises(TypeError): mmcv.import_modules_from_strings(1) with pytest.raises(TypeError): mmcv.import_modules_from_strings([1]) with pytest.raises(ImportError): mmcv.import_modules_from_strings('_not_implemented_module') with pytest.warns(UserWarning): imported = mmcv.import_modules_from_strings('_not_implemented_module', allow_failed_imports=True) assert (imported is None) with pytest.warns(UserWarning): imported = mmcv.import_modules_from_strings(['os.path', '_not_implemented'], allow_failed_imports=True) assert (imported[0] == osp) assert (imported[1] is None)
def test_is_method_overridden(): class Base(): def foo1(): pass def foo2(): pass class Sub(Base): def foo1(): pass assert mmcv.is_method_overridden('foo1', Base, Sub) assert (not mmcv.is_method_overridden('foo2', Base, Sub)) sub_instance = Sub() assert mmcv.is_method_overridden('foo1', Base, sub_instance) assert (not mmcv.is_method_overridden('foo2', Base, sub_instance)) base_instance = Base() with pytest.raises(AssertionError): mmcv.is_method_overridden('foo1', base_instance, sub_instance)
def test_has_method(): class Foo(): def __init__(self, name): self.name = name def print_name(self): print(self.name) foo = Foo('foo') assert (not has_method(foo, 'name')) assert has_method(foo, 'print_name')
def test_deprecated_api_warning(): @deprecated_api_warning(name_dict=dict(old_key='new_key')) def dummy_func(new_key=1): return new_key assert (dummy_func(old_key=2) == 2) with pytest.raises(AssertionError): dummy_func(old_key=1, new_key=2)
class TestJit(object): def test_add_dict(self): @mmcv.jit def add_dict(oper): rets = (oper['x'] + oper['y']) return {'result': rets} def add_dict_pyfunc(oper): rets = (oper['x'] + oper['y']) return {'result': rets} a = torch.rand((3, 4)) b = torch.rand((3, 4)) oper = {'x': a, 'y': b} rets_t = add_dict(oper) rets = add_dict_pyfunc(oper) assert ('result' in rets) assert (rets_t['result'] == rets['result']).all() def test_add_list(self): @mmcv.jit def add_list(oper, x, y): rets = {} for (idx, pair) in enumerate(oper): rets[f'k{idx}'] = (pair['x'] + pair['y']) rets[f'k{len(oper)}'] = (x + y) return rets def add_list_pyfunc(oper, x, y): rets = {} for (idx, pair) in enumerate(oper): rets[f'k{idx}'] = (pair['x'] + pair['y']) rets[f'k{len(oper)}'] = (x + y) return rets pair_num = 3 oper = [] for _ in range(pair_num): oper.append({'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))}) a = torch.rand((3, 4)) b = torch.rand((3, 4)) rets = add_list_pyfunc(oper, x=a, y=b) rets_t = add_list(oper, x=a, y=b) for idx in range((pair_num + 1)): assert (f'k{idx}' in rets_t) assert (rets[f'k{idx}'] == rets_t[f'k{idx}']).all() @skip_no_parrots def test_jit_cache(self): @mmcv.jit def func(oper): if (oper['const'] > 1): return ((oper['x'] * 2) + oper['y']) else: return ((oper['x'] * 2) - oper['y']) def pyfunc(oper): if (oper['const'] > 1): return ((oper['x'] * 2) + oper['y']) else: return ((oper['x'] * 2) - oper['y']) assert (len(func._cache._cache) == 0) oper = {'const': 2, 'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))} rets_plus = pyfunc(oper) rets_plus_t = func(oper) assert (rets_plus == rets_plus_t).all() assert (len(func._cache._cache) == 1) oper['const'] = 0.5 rets_minus = pyfunc(oper) rets_minus_t = func(oper) assert (rets_minus == rets_minus_t).all() assert (len(func._cache._cache) == 2) rets_a = ((rets_minus_t + rets_plus_t) / 4) assert torch.allclose(oper['x'], rets_a) @skip_no_parrots def test_jit_shape(self): @mmcv.jit def func(a): return (a + 1) assert (len(func._cache._cache) == 0) a = torch.ones((3, 4)) r = func(a) assert (r.shape == (3, 4)) assert (r == 2).all() assert (len(func._cache._cache) == 1) a = torch.ones((2, 3, 4)) r = func(a) assert (r.shape == (2, 3, 4)) assert (r == 2).all() assert (len(func._cache._cache) == 2) @skip_no_parrots def test_jit_kwargs(self): @mmcv.jit def func(a, b): return torch.mean(((a - b) * (a - b))) assert (len(func._cache._cache) == 0) x = torch.rand((16, 32)) y = torch.rand((16, 32)) func(x, y) assert (len(func._cache._cache) == 1) func(x, b=y) assert (len(func._cache._cache) == 1) func(b=y, a=x) assert (len(func._cache._cache) == 1) def test_jit_derivate(self): @mmcv.jit(derivate=True) def func(x, y): return ((x + 2) * (y - 2)) a = torch.rand((3, 4)) b = torch.rand((3, 4)) a.requires_grad = True c = func(a, b) assert c.requires_grad d = torch.empty_like(c) d.fill_(1.0) c.backward(d) assert torch.allclose(a.grad, (b - 2)) assert (b.grad is None) a.grad = None c = func(a, b) assert c.requires_grad d = torch.empty_like(c) d.fill_(2.7) c.backward(d) assert torch.allclose(a.grad, (2.7 * (b - 2))) assert (b.grad is None) def test_jit_optimize(self): @mmcv.jit(optimize=True) def func(a, b): return torch.mean(((a - b) * (a - b))) def pyfunc(a, b): return torch.mean(((a - b) * (a - b))) a = torch.rand((16, 32)) b = torch.rand((16, 32)) c = func(a, b) d = pyfunc(a, b) assert torch.allclose(c, d) @mmcv.skip_no_elena def test_jit_coderize(self): if (not torch.cuda.is_available()): return @mmcv.jit(coderize=True) def func(a, b): return ((a + b) * (a - b)) def pyfunc(a, b): return ((a + b) * (a - b)) a = torch.rand((16, 32), device='cuda') b = torch.rand((16, 32), device='cuda') c = func(a, b) d = pyfunc(a, b) assert torch.allclose(c, d) def test_jit_value_dependent(self): @mmcv.jit def func(a, b): torch.nonzero(a) return torch.mean(((a - b) * (a - b))) def pyfunc(a, b): torch.nonzero(a) return torch.mean(((a - b) * (a - b))) a = torch.rand((16, 32)) b = torch.rand((16, 32)) c = func(a, b) d = pyfunc(a, b) assert torch.allclose(c, d) @skip_no_parrots def test_jit_check_input(self): def func(x): y = torch.rand_like(x) return (x + y) a = torch.ones((3, 4)) with pytest.raises(AssertionError): func = mmcv.jit(func, check_input=(a,)) @skip_no_parrots def test_jit_partial_shape(self): @mmcv.jit(full_shape=False) def func(a, b): return torch.mean(((a - b) * (a - b))) def pyfunc(a, b): return torch.mean(((a - b) * (a - b))) a = torch.rand((3, 4)) b = torch.rand((3, 4)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 1) a = torch.rand((6, 5)) b = torch.rand((6, 5)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 1) a = torch.rand((3, 4, 5)) b = torch.rand((3, 4, 5)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 2) a = torch.rand((1, 9, 8)) b = torch.rand((1, 9, 8)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 2) def test_instance_method(self): class T(object): def __init__(self, shape): self._c = torch.rand(shape) @mmcv.jit def test_method(self, x, y): return ((x * self._c) + y) shape = (16, 32) t = T(shape) a = torch.rand(shape) b = torch.rand(shape) res = ((a * t._c) + b) jit_res = t.test_method(a, b) assert torch.allclose(res, jit_res) t = T(shape) res = ((a * t._c) + b) jit_res = t.test_method(a, b) assert torch.allclose(res, jit_res)
def test_is_filepath(): assert mmcv.is_filepath(__file__) assert mmcv.is_filepath('abc') assert mmcv.is_filepath(Path('/etc')) assert (not mmcv.is_filepath(0))
def test_fopen(): assert hasattr(mmcv.fopen(__file__), 'read') assert hasattr(mmcv.fopen(Path(__file__)), 'read')
def test_check_file_exist(): mmcv.check_file_exist(__file__) with pytest.raises(FileNotFoundError): mmcv.check_file_exist('no_such_file.txt')
def test_scandir(): folder = osp.join(osp.dirname(osp.dirname(__file__)), 'data/for_scan') filenames = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT'] assert (set(mmcv.scandir(folder)) == set(filenames)) assert (set(mmcv.scandir(Path(folder))) == set(filenames)) assert (set(mmcv.scandir(folder, '.txt')) == set([filename for filename in filenames if filename.endswith('.txt')])) assert (set(mmcv.scandir(folder, ('.json', '.txt'))) == set([filename for filename in filenames if filename.endswith(('.txt', '.json'))])) assert (set(mmcv.scandir(folder, '.png')) == set()) filenames_recursive = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT', osp.join('sub', '1.json'), osp.join('sub', '1.txt'), '.file'] assert (set(mmcv.scandir(folder, recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')])) assert (set(mmcv.scandir(Path(folder), recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')])) assert (set(mmcv.scandir(folder, '.txt', recursive=True)) == set([filename for filename in filenames_recursive if filename.endswith('.txt')])) assert (set(mmcv.scandir(folder, '.TXT', recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.TXT'))])) assert (set(mmcv.scandir(folder, ('.TXT', '.JSON'), recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.json', '.TXT'))])) with pytest.raises(TypeError): list(mmcv.scandir(123)) with pytest.raises(TypeError): list(mmcv.scandir(folder, 111))
def reset_string_io(io): io.truncate(0) io.seek(0)
class TestProgressBar(): def test_start(self): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out) assert (out.getvalue() == 'completed: 0, elapsed: 0s') reset_string_io(out) prog_bar = mmcv.ProgressBar(bar_width=bar_width, start=False, file=out) assert (out.getvalue() == '') reset_string_io(out) prog_bar.start() assert (out.getvalue() == 'completed: 0, elapsed: 0s') reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:") reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, start=False, file=out) assert (out.getvalue() == '') reset_string_io(out) prog_bar.start() assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:") def test_update(self): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (out.getvalue() == 'completed: 1, elapsed: 1s, 1.0 tasks/s') reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (out.getvalue() == f''' [{(('>' * 2) + (' ' * 18))}] 1/10, 1.0 task/s, elapsed: 1s, ETA: 9s''') def test_adaptive_length(self): with patch.dict('os.environ', {'COLUMNS': '80'}): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 66) os.environ['COLUMNS'] = '30' reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 48) os.environ['COLUMNS'] = '60' reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 60)
def sleep_1s(num): time.sleep(1) return num
def test_track_progress_list(): out = StringIO() ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_progress_iterator(): out = StringIO() ret = mmcv.track_progress(sleep_1s, ((i for i in [1, 2, 3]), 3), bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_iter_progress(): out = StringIO() ret = [] for num in mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out): ret.append(sleep_1s(num)) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_enum_progress(): out = StringIO() ret = [] count = [] for (i, num) in enumerate(mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out)): ret.append(sleep_1s(num)) count.append(i) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3]) assert (count == [0, 1, 2])
def test_track_parallel_progress_list(): out = StringIO() results = mmcv.track_parallel_progress(sleep_1s, [1, 2, 3, 4], 2, bar_width=4, file=out) assert (results == [1, 2, 3, 4])
def test_track_parallel_progress_iterator(): out = StringIO() results = mmcv.track_parallel_progress(sleep_1s, ((i for i in [1, 2, 3, 4]), 4), 2, bar_width=4, file=out) assert (results == [1, 2, 3, 4])
def test_registry(): CATS = mmcv.Registry('cat') assert (CATS.name == 'cat') assert (CATS.module_dict == {}) assert (len(CATS) == 0) @CATS.register_module() class BritishShorthair(): pass assert (len(CATS) == 1) assert (CATS.get('BritishShorthair') is BritishShorthair) class Munchkin(): pass CATS.register_module(Munchkin) assert (len(CATS) == 2) assert (CATS.get('Munchkin') is Munchkin) assert ('Munchkin' in CATS) with pytest.raises(KeyError): CATS.register_module(Munchkin) CATS.register_module(Munchkin, force=True) assert (len(CATS) == 2) with pytest.raises(KeyError): @CATS.register_module() class BritishShorthair(): pass @CATS.register_module(force=True) class BritishShorthair(): pass assert (len(CATS) == 2) assert (CATS.get('PersianCat') is None) assert ('PersianCat' not in CATS) @CATS.register_module(name=['Siamese', 'Siamese2']) class SiameseCat(): pass assert (CATS.get('Siamese').__name__ == 'SiameseCat') assert (CATS.get('Siamese2').__name__ == 'SiameseCat') class SphynxCat(): pass CATS.register_module(name='Sphynx', module=SphynxCat) assert (CATS.get('Sphynx') is SphynxCat) CATS.register_module(name=['Sphynx1', 'Sphynx2'], module=SphynxCat) assert (CATS.get('Sphynx2') is SphynxCat) repr_str = 'Registry(name=cat, items={' repr_str += "'BritishShorthair': <class 'test_registry.test_registry.<locals>.BritishShorthair'>, " repr_str += "'Munchkin': <class 'test_registry.test_registry.<locals>.Munchkin'>, " repr_str += "'Siamese': <class 'test_registry.test_registry.<locals>.SiameseCat'>, " repr_str += "'Siamese2': <class 'test_registry.test_registry.<locals>.SiameseCat'>, " repr_str += "'Sphynx': <class 'test_registry.test_registry.<locals>.SphynxCat'>, " repr_str += "'Sphynx1': <class 'test_registry.test_registry.<locals>.SphynxCat'>, " repr_str += "'Sphynx2': <class 'test_registry.test_registry.<locals>.SphynxCat'>" repr_str += '})' assert (repr(CATS) == repr_str) with pytest.raises(TypeError): CATS.register_module(name=7474741, module=SphynxCat) with pytest.raises(TypeError): CATS.register_module(0) with pytest.raises(TypeError): @CATS.register_module() def some_method(): pass with pytest.warns(DeprecationWarning): CATS.register_module(SphynxCat) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): CATS.register_module(SphynxCat, force=True) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): @CATS.register_module class NewCat(): pass assert (CATS.get('NewCat').__name__ == 'NewCat') with pytest.warns(DeprecationWarning): CATS.deprecated_register_module(SphynxCat, force=True) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): @CATS.deprecated_register_module class CuteCat(): pass assert (CATS.get('CuteCat').__name__ == 'CuteCat') with pytest.warns(DeprecationWarning): @CATS.deprecated_register_module(force=True) class NewCat2(): pass assert (CATS.get('NewCat2').__name__ == 'NewCat2')
def test_multi_scope_registry(): DOGS = mmcv.Registry('dogs') assert (DOGS.name == 'dogs') assert (DOGS.scope == 'test_registry') assert (DOGS.module_dict == {}) assert (len(DOGS) == 0) @DOGS.register_module() class GoldenRetriever(): pass assert (len(DOGS) == 1) assert (DOGS.get('GoldenRetriever') is GoldenRetriever) HOUNDS = mmcv.Registry('dogs', parent=DOGS, scope='hound') @HOUNDS.register_module() class BloodHound(): pass assert (len(HOUNDS) == 1) assert (HOUNDS.get('BloodHound') is BloodHound) assert (DOGS.get('hound.BloodHound') is BloodHound) assert (HOUNDS.get('hound.BloodHound') is BloodHound) LITTLE_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='little_hound') @LITTLE_HOUNDS.register_module() class Dachshund(): pass assert (len(LITTLE_HOUNDS) == 1) assert (LITTLE_HOUNDS.get('Dachshund') is Dachshund) assert (LITTLE_HOUNDS.get('hound.BloodHound') is BloodHound) assert (HOUNDS.get('little_hound.Dachshund') is Dachshund) assert (DOGS.get('hound.little_hound.Dachshund') is Dachshund) MID_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='mid_hound') @MID_HOUNDS.register_module() class Beagle(): pass assert (MID_HOUNDS.get('Beagle') is Beagle) assert (HOUNDS.get('mid_hound.Beagle') is Beagle) assert (DOGS.get('hound.mid_hound.Beagle') is Beagle) assert (LITTLE_HOUNDS.get('hound.mid_hound.Beagle') is Beagle) assert (MID_HOUNDS.get('hound.BloodHound') is BloodHound) assert (MID_HOUNDS.get('hound.Dachshund') is None)
def test_build_from_cfg(): BACKBONES = mmcv.Registry('backbone') @BACKBONES.register_module() class ResNet(): def __init__(self, depth, stages=4): self.depth = depth self.stages = stages @BACKBONES.register_module() class ResNeXt(): def __init__(self, depth, stages=4): self.depth = depth self.stages = stages cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args={'stages': 3}) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 3)) cfg = dict(type='ResNeXt', depth=50, stages=3) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNeXt) assert ((model.depth == 50) and (model.stages == 3)) cfg = dict(type=ResNet, depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type='ResNet')) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type=ResNet)) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) with pytest.raises(TypeError): cfg = dict(type='VGG') model = mmcv.build_from_cfg(cfg, 'BACKBONES') with pytest.raises(KeyError): cfg = dict(type='VGG') model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=1) with pytest.raises(TypeError): cfg = dict(type=1000) model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(KeyError, match='must contain the key "type"'): cfg = dict(depth=50, stages=4) model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(KeyError, match='must contain the key "type"'): cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(stages=4)) with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, 'BACKBONES') with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=0) with pytest.raises(TypeError): cfg = dict(type='ResNet', non_existing_arg=50) model = mmcv.build_from_cfg(cfg, BACKBONES)
def test_assert_dict_contains_subset(): dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6)} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6)} assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset) expected_subset = {'a': 'test1', 'b': 2, 'c': (6, 4)} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'a': 'test1', 'b': 2, 'c': None} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'a': 'test1', 'b': 2, 'd': (4, 6)} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [1, 2, 3]])} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [6, 2, 3]])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])} assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset) if (torch is not None): dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': torch.tensor([5, 3, 5])} expected_subset = {'d': torch.tensor([5, 5, 5])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'d': torch.tensor([[5, 3, 5], [4, 1, 2]])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
def test_assert_attrs_equal(): class TestExample(object): (a, b, c) = (1, ('wvi', 3), [4.5, 3.14]) def test_func(self): return self.b assert mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14]}) assert (not mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14, 2]})) assert (not mmcv.assert_attrs_equal(TestExample, {'bc': 54, 'c': [4.5, 3.14]})) assert mmcv.assert_attrs_equal(TestExample, {'b': ('wvi', 3), 'test_func': TestExample.test_func}) if (torch is not None): class TestExample(object): (a, b) = (torch.tensor([1]), torch.tensor([4, 5])) assert mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 5])}) assert (not mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 6])}))
@pytest.mark.parametrize('obj', assert_dict_has_keys_data_1) @pytest.mark.parametrize('expected_keys, ret_value', assert_dict_has_keys_data_2) def test_assert_dict_has_keys(obj, expected_keys, ret_value): assert (mmcv.assert_dict_has_keys(obj, expected_keys) == ret_value)
@pytest.mark.parametrize('result_keys', assert_keys_equal_data_1) @pytest.mark.parametrize('target_keys, ret_value', assert_keys_equal_data_2) def test_assert_keys_equal(result_keys, target_keys, ret_value): assert (mmcv.assert_keys_equal(result_keys, target_keys) == ret_value)
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_assert_is_norm_layer(): assert (not mmcv.assert_is_norm_layer(nn.Conv3d(3, 64, 3))) assert mmcv.assert_is_norm_layer(nn.BatchNorm3d(128)) assert mmcv.assert_is_norm_layer(nn.GroupNorm(8, 64)) assert (not mmcv.assert_is_norm_layer(nn.Sigmoid()))
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_assert_params_all_zeros(): demo_module = nn.Conv2d(3, 64, 3) nn.init.constant_(demo_module.weight, 0) nn.init.constant_(demo_module.bias, 0) assert mmcv.assert_params_all_zeros(demo_module) nn.init.xavier_normal_(demo_module.weight) nn.init.constant_(demo_module.bias, 0) assert (not mmcv.assert_params_all_zeros(demo_module)) demo_module = nn.Linear(2048, 400, bias=False) nn.init.constant_(demo_module.weight, 0) assert mmcv.assert_params_all_zeros(demo_module) nn.init.normal_(demo_module.weight, mean=0, std=0.01) assert (not mmcv.assert_params_all_zeros(demo_module))
def test_check_python_script(capsys): mmcv.utils.check_python_script('./tests/data/scripts/hello.py zz') captured = capsys.readouterr().out assert (captured == 'hello zz!\n') mmcv.utils.check_python_script('./tests/data/scripts/hello.py agent') captured = capsys.readouterr().out assert (captured == 'hello agent!\n') with pytest.raises(SystemExit): mmcv.utils.check_python_script('./tests/data/scripts/hello.py li zz')
def test_timer_init(): timer = mmcv.Timer(start=False) assert (not timer.is_running) timer.start() assert timer.is_running timer = mmcv.Timer() assert timer.is_running
def test_timer_run(): timer = mmcv.Timer() time.sleep(1) assert (abs((timer.since_start() - 1)) < 0.01) time.sleep(1) assert (abs((timer.since_last_check() - 1)) < 0.01) assert (abs((timer.since_start() - 2)) < 0.01) timer = mmcv.Timer(False) with pytest.raises(mmcv.TimerError): timer.since_start() with pytest.raises(mmcv.TimerError): timer.since_last_check()
def test_timer_context(capsys): with mmcv.Timer(): time.sleep(1) (out, _) = capsys.readouterr() assert (abs((float(out) - 1)) < 0.01) with mmcv.Timer(print_tmpl='time: {:.1f}s'): time.sleep(1) (out, _) = capsys.readouterr() assert (out == 'time: 1.0s\n')
@pytest.mark.skipif((digit_version(torch.__version__) < digit_version('1.6.0')), reason='torch.jit.is_tracing is not available before 1.6.0') def test_is_jit_tracing(): def foo(x): if is_jit_tracing(): return x else: return x.tolist() x = torch.rand(3) assert isinstance(foo(x), list) traced_foo = torch.jit.trace(foo, (torch.rand(1),)) assert isinstance(traced_foo(x), torch.Tensor)
def test_digit_version(): assert (digit_version('0.2.16') == (0, 2, 16, 0, 0, 0)) assert (digit_version('1.2.3') == (1, 2, 3, 0, 0, 0)) assert (digit_version('1.2.3rc0') == (1, 2, 3, 0, (- 1), 0)) assert (digit_version('1.2.3rc1') == (1, 2, 3, 0, (- 1), 1)) assert (digit_version('1.0rc0') == (1, 0, 0, 0, (- 1), 0)) assert (digit_version('1.0') == digit_version('1.0.0')) assert (digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5')) assert (digit_version('1.0.0dev') < digit_version('1.0.0a')) assert (digit_version('1.0.0a') < digit_version('1.0.0a1')) assert (digit_version('1.0.0a') < digit_version('1.0.0b')) assert (digit_version('1.0.0b') < digit_version('1.0.0rc')) assert (digit_version('1.0.0rc1') < digit_version('1.0.0')) assert (digit_version('1.0.0') < digit_version('1.0.0post')) assert (digit_version('1.0.0post') < digit_version('1.0.0post1')) assert (digit_version('v1') == (1, 0, 0, 0, 0, 0)) assert (digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)) with pytest.raises(AssertionError): digit_version('a') with pytest.raises(AssertionError): digit_version('1x') with pytest.raises(AssertionError): digit_version('1.x')
def test_parse_version_info(): assert (parse_version_info('0.2.16') == (0, 2, 16, 0, 0, 0)) assert (parse_version_info('1.2.3') == (1, 2, 3, 0, 0, 0)) assert (parse_version_info('1.2.3rc0') == (1, 2, 3, 0, 'rc', 0)) assert (parse_version_info('1.2.3rc1') == (1, 2, 3, 0, 'rc', 1)) assert (parse_version_info('1.0rc0') == (1, 0, 0, 0, 'rc', 0))
def _mock_cmd_success(cmd): return '3b46d33e90c397869ad5103075838fdfc9812aa0'.encode('ascii')
def _mock_cmd_fail(cmd): raise OSError
def test_get_git_hash(): with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success): assert (get_git_hash() == '3b46d33e90c397869ad5103075838fdfc9812aa0') assert (get_git_hash(digits=6) == '3b46d3') assert (get_git_hash(digits=100) == get_git_hash()) with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_fail): assert (get_git_hash() == 'unknown') assert (get_git_hash(fallback='n/a') == 'n/a')
class TestVideoEditor(): @classmethod def setup_class(cls): cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4') cls.num_frames = 168 @pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows') def test_cut_concat_video(self): part1_file = osp.join(tempfile.gettempdir(), '.mmcv_test1.mp4') part2_file = osp.join(tempfile.gettempdir(), '.mmcv_test2.mp4') mmcv.cut_video(self.video_path, part1_file, end=3, vcodec='h264') mmcv.cut_video(self.video_path, part2_file, start=3, vcodec='h264') v1 = mmcv.VideoReader(part1_file) v2 = mmcv.VideoReader(part2_file) assert (len(v1) == 75) assert (len(v2) == (self.num_frames - 75)) out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4') mmcv.concat_video([part1_file, part2_file], out_file) v = mmcv.VideoReader(out_file) assert (len(v) == self.num_frames) os.remove(part1_file) os.remove(part2_file) os.remove(out_file) @pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows') def test_resize_video(self): out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4') mmcv.resize_video(self.video_path, out_file, (200, 100), log_level='panic') v = mmcv.VideoReader(out_file) assert (v.resolution == (200, 100)) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, ratio=2) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), (240 * 2))) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, (1000, 480), keep_ar=True) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), (240 * 2))) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, ratio=(2, 1.5), keep_ar=True) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), 360)) os.remove(out_file)
class TestCache(): def test_init(self): with pytest.raises(ValueError): mmcv.Cache(0) cache = mmcv.Cache(100) assert (cache.capacity == 100) assert (cache.size == 0) def test_put(self): cache = mmcv.Cache(3) for i in range(1, 4): cache.put(f'k{i}', i) assert (cache.size == i) assert (cache._cache == OrderedDict([('k1', 1), ('k2', 2), ('k3', 3)])) cache.put('k4', 4) assert (cache.size == 3) assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])) cache.put('k2', 2) assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])) def test_get(self): cache = mmcv.Cache(3) assert (cache.get('key_none') is None) assert (cache.get('key_none', 0) == 0) cache.put('k1', 1) assert (cache.get('k1') == 1)
class TestVideoReader(): @classmethod def setup_class(cls): cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4') cls.num_frames = 168 cls.video_url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4' def test_load(self): v = mmcv.VideoReader(self.video_path) assert (v.width == 294) assert (v.height == 240) assert (v.fps == 25) assert (v.frame_cnt == self.num_frames) assert (len(v) == self.num_frames) assert v.opened import cv2 assert isinstance(v.vcap, type(cv2.VideoCapture())) v = mmcv.VideoReader(self.video_url) assert (v.width == 320) assert (v.height == 240) assert (v.fps == 15) assert (v.frame_cnt == 1889) assert (len(v) == 1889) assert v.opened assert isinstance(v.vcap, type(cv2.VideoCapture())) def test_read(self): v = mmcv.VideoReader(self.video_path) img = v.read() assert (int(round(img.mean())) == 94) img = v.get_frame(63) assert (int(round(img.mean())) == 94) img = v[64] assert (int(round(img.mean())) == 205) img = v[(- 104)] assert (int(round(img.mean())) == 205) img = v[63] assert (int(round(img.mean())) == 94) img = v[(- 105)] assert (int(round(img.mean())) == 94) img = v.read() assert (int(round(img.mean())) == 205) with pytest.raises(IndexError): v.get_frame((self.num_frames + 1)) with pytest.raises(IndexError): v[((- self.num_frames) - 1)] def test_slice(self): v = mmcv.VideoReader(self.video_path) imgs = v[(- 105):(- 103)] assert (int(round(imgs[0].mean())) == 94) assert (int(round(imgs[1].mean())) == 205) assert (len(imgs) == 2) imgs = v[63:65] assert (int(round(imgs[0].mean())) == 94) assert (int(round(imgs[1].mean())) == 205) assert (len(imgs) == 2) imgs = v[64:62:(- 1)] assert (int(round(imgs[0].mean())) == 205) assert (int(round(imgs[1].mean())) == 94) assert (len(imgs) == 2) imgs = v[:5] assert (len(imgs) == 5) for img in imgs: assert (int(round(img.mean())) == 94) imgs = v[165:] assert (len(imgs) == 3) for img in imgs: assert (int(round(img.mean())) == 0) imgs = v[(- 3):] assert (len(imgs) == 3) for img in imgs: assert (int(round(img.mean())) == 0) def test_current_frame(self): v = mmcv.VideoReader(self.video_path) assert (v.current_frame() is None) v.read() img = v.current_frame() assert (int(round(img.mean())) == 94) def test_position(self): v = mmcv.VideoReader(self.video_path) assert (v.position == 0) for _ in range(10): v.read() assert (v.position == 10) v.get_frame(99) assert (v.position == 100) def test_iterator(self): cnt = 0 for img in mmcv.VideoReader(self.video_path): cnt += 1 assert (img.shape == (240, 294, 3)) assert (cnt == self.num_frames) def test_with(self): with mmcv.VideoReader(self.video_path) as v: assert v.opened assert (not v.opened) def test_cvt2frames(self): v = mmcv.VideoReader(self.video_path) frame_dir = tempfile.mkdtemp() v.cvt2frames(frame_dir) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) os.remove(filename) v = mmcv.VideoReader(self.video_path) v.cvt2frames(frame_dir, show_progress=False) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) os.remove(filename) v = mmcv.VideoReader(self.video_path) v.cvt2frames(frame_dir, file_start=100, filename_tmpl='{:03d}.JPEG', start=100, max_num=20) assert osp.isdir(frame_dir) for i in range(100, 120): filename = f'{frame_dir}/{i:03d}.JPEG' assert osp.isfile(filename) os.remove(filename) shutil.rmtree(frame_dir) def test_frames2video(self): v = mmcv.VideoReader(self.video_path) frame_dir = tempfile.mkdtemp() v.cvt2frames(frame_dir) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) out_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.avi') mmcv.frames2video(frame_dir, out_filename) v = mmcv.VideoReader(out_filename) assert (v.fps == 30) assert (len(v) == self.num_frames) mmcv.frames2video(frame_dir, out_filename, fps=25, start=10, end=50, show_progress=False) with mmcv.VideoReader(out_filename) as v: assert (v.fps == 25) assert (len(v) == 40) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' os.remove(filename) shutil.rmtree(frame_dir)
def test_color(): assert (mmcv.color_val(mmcv.Color.blue) == (255, 0, 0)) assert (mmcv.color_val('green') == (0, 255, 0)) assert (mmcv.color_val((1, 2, 3)) == (1, 2, 3)) assert (mmcv.color_val(100) == (100, 100, 100)) assert (mmcv.color_val(np.zeros(3, dtype=int)) == (0, 0, 0)) with pytest.raises(TypeError): mmcv.color_val([255, 255, 255]) with pytest.raises(TypeError): mmcv.color_val(1.0) with pytest.raises(AssertionError): mmcv.color_val((0, 0, 500))
def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif (x.find('rc') != (- 1)): patch_version = x.split('rc') digit_version.append((int(patch_version[0]) - 1)) digit_version.append(int(patch_version[1])) return digit_version
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): 'Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n cfg_options (dict): Options to override some settings in the used\n config.\n\n Returns:\n nn.Module: The constructed detector.\n ' if isinstance(config, str): config = mmcv.Config.fromfile(config) elif (not isinstance(config, mmcv.Config)): raise TypeError(f'config must be a filename or Config object, but got {type(config)}') if (cfg_options is not None): config.merge_from_dict(cfg_options) if ('pretrained' in config.model): config.model.pretrained = None elif ('init_cfg' in config.model.backbone): config.model.backbone.init_cfg = None config.model.train_cfg = None model = build_detector(config.model, test_cfg=config.get('test_cfg')) if (checkpoint is not None): checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') if ('CLASSES' in checkpoint.get('meta', {})): model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.") model.CLASSES = get_classes('coco') model.cfg = config model.to(device) model.eval() return model
class LoadImage(): 'Deprecated.\n\n A simple pipeline to load image.\n ' def __call__(self, results): 'Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n Returns:\n dict: ``results`` will be returned containing loaded image.\n ' warnings.simplefilter('once') warnings.warn('`LoadImage` is deprecated and will be removed in future releases. You may use `LoadImageFromWebcam` from `mmdet.datasets.pipelines.` instead.') if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
def inference_detector(model, imgs): 'Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n Either image files or loaded images.\n\n Returns:\n If imgs is a list or tuple, the same length list type results\n will be returned, otherwise return the detection results directly.\n ' if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg device = next(model.parameters()).device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: if isinstance(img, np.ndarray): data = dict(img=img) else: data = dict(img_info=dict(filename=img), img_prefix=None) data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: data = scatter(data, [device])[0] else: for m in model.modules(): assert (not isinstance(m, RoIPool)), 'CPU inference with RoIPool is not supported currently.' with torch.no_grad(): results = model(return_loss=False, rescale=True, **data) if (not is_batch): return results[0] else: return results
def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None): 'Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n score_thr (float): The threshold to visualize the bboxes and masks.\n title (str): Title of the pyplot figure.\n wait_time (float): Value of waitKey param.\n Default: 0.\n ' if hasattr(model, 'module'): model = model.module model.show_result(img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=palette, text_color=(200, 200, 200), mask_color=palette)
def init_random_seed(seed=None, device='cuda'): "Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n " if (seed is not None): return seed (rank, world_size) = get_dist_info() seed = np.random.randint((2 ** 31)) if (world_size == 1): return seed if (rank == 0): random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
def set_random_seed(seed, deterministic=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n ' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(log_level=cfg.log_level) dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) if ('imgs_per_gpu' in cfg.data): logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead') if ('samples_per_gpu' in cfg.data): logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments') else: logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu runner_type = ('EpochBasedRunner' if ('runner' not in cfg) else cfg.runner['type']) data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=cfg.data.get('persistent_workers', False)) for ds in dataset] if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = MMDataParallel(model, device_ids=cfg.gpu_ids) optimizer = build_optimizer(model, cfg.optimizer) if ('runner' not in cfg): cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs} warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning) elif ('total_epochs' in cfg): assert (cfg.total_epochs == cfg.runner.max_epochs) runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) runner.timestamp = timestamp fp16_cfg = cfg.get('fp16', None) if (fp16_cfg is not None): optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif (distributed and ('type' not in cfg.optimizer_config)): optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) if validate: val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) if (val_samples_per_gpu > 1): cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner') eval_hook = (DistEvalHook if distributed else EvalHook) runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW') resume_from = None if ((cfg.resume_from is None) and cfg.get('auto_resume')): resume_from = find_latest_checkpoint(cfg.work_dir) if (resume_from is not None): cfg.resume_from = resume_from if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow)
def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None): warnings.warn('``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` ') return build_prior_generator(cfg, default_args=default_args)
@PRIOR_GENERATORS.register_module() class PointGenerator(): def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1)) if row_major: return (xx, yy) else: return (yy, xx) def grid_points(self, featmap_size, stride=16, device='cuda'): (feat_h, feat_w) = featmap_size shift_x = (torch.arange(0.0, feat_w, device=device) * stride) shift_y = (torch.arange(0.0, feat_h, device=device) * stride) (shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0],), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=(- 1)) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): (feat_h, feat_w) = featmap_size (valid_h, valid_w) = valid_size assert ((valid_h <= feat_h) and (valid_w <= feat_w)) valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 (valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y) valid = (valid_xx & valid_yy) return valid
@PRIOR_GENERATORS.register_module() class MlvlPointGenerator(): 'Standard points generator for multi-level (Mlvl) feature maps in 2D\n points-based detectors.\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels in order (w, h).\n offset (float): The offset of points, the value is normalized with\n corresponding stride. Defaults to 0.5.\n ' def __init__(self, strides, offset=0.5): self.strides = [_pair(stride) for stride in strides] self.offset = offset @property def num_levels(self): 'int: number of feature levels that the generator will be applied' return len(self.strides) @property def num_base_priors(self): 'list[int]: The number of priors (points) at a point\n on the feature grid' return [1 for _ in range(len(self.strides))] def _meshgrid(self, x, y, row_major=True): (yy, xx) = torch.meshgrid(y, x) if row_major: return (xx.reshape((- 1)), yy.reshape((- 1))) else: return (yy.reshape((- 1)), xx.reshape((- 1))) def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda', with_stride=False): 'Generate grid points of multiple feature levels.\n\n Args:\n featmap_sizes (list[tuple]): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str): The device where the anchors will be put on.\n with_stride (bool): Whether to concatenate the stride to\n the last dimension of points.\n\n Return:\n list[torch.Tensor]: Points of multiple feature levels.\n The sizes of each tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n ' assert (self.num_levels == len(featmap_sizes)) multi_level_priors = [] for i in range(self.num_levels): priors = self.single_level_grid_priors(featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False): "Generate grid Points of a single level.\n\n Note:\n This function is usually called by method ``self.grid_priors``.\n\n Args:\n featmap_size (tuple[int]): Size of the feature maps, arrange as\n (h, w).\n level_idx (int): The index of corresponding feature map level.\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str, optional): The device the tensor will be put on.\n Defaults to 'cuda'.\n with_stride (bool): Concatenate the stride to the last dimension\n of points.\n\n Return:\n Tensor: Points of single feature levels.\n The shape of tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n " (feat_h, feat_w) = featmap_size (stride_w, stride_h) = self.strides[level_idx] shift_x = ((torch.arange(0, feat_w, device=device) + self.offset) * stride_w) shift_x = shift_x.to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + self.offset) * stride_h) shift_y = shift_y.to(dtype) (shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y) if (not with_stride): shifts = torch.stack([shift_xx, shift_yy], dim=(- 1)) else: stride_w = shift_xx.new_full((shift_xx.shape[0],), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0],), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=(- 1)) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): 'Generate valid flags of points of multiple feature levels.\n\n Args:\n featmap_sizes (list(tuple)): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n pad_shape (tuple(int)): The padded shape of the image,\n arrange as (h, w).\n device (str): The device where the anchors will be put on.\n\n Return:\n list(torch.Tensor): Valid flags of points of multiple levels.\n ' assert (self.num_levels == len(featmap_sizes)) multi_level_flags = [] for i in range(self.num_levels): point_stride = self.strides[i] (feat_h, feat_w) = featmap_sizes[i] (h, w) = pad_shape[:2] valid_feat_h = min(int(np.ceil((h / point_stride[1]))), feat_h) valid_feat_w = min(int(np.ceil((w / point_stride[0]))), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, device='cuda'): "Generate the valid flags of points of a single feature map.\n\n Args:\n featmap_size (tuple[int]): The size of feature maps, arrange as\n as (h, w).\n valid_size (tuple[int]): The valid size of the feature maps.\n The size arrange as as (h, w).\n device (str, optional): The device where the flags will be put on.\n Defaults to 'cuda'.\n\n Returns:\n torch.Tensor: The valid flags of each points in a single level feature map.\n " (feat_h, feat_w) = featmap_size (valid_h, valid_w) = valid_size assert ((valid_h <= feat_h) and (valid_w <= feat_w)) valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 (valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y) valid = (valid_xx & valid_yy) return valid def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): 'Generate sparse points according to the ``prior_idxs``.\n\n Args:\n prior_idxs (Tensor): The index of corresponding anchors\n in the feature map.\n featmap_size (tuple[int]): feature map size arrange as (w, h).\n level_idx (int): The level index of corresponding feature\n map.\n dtype (obj:`torch.dtype`): Date type of points. Defaults to\n ``torch.float32``.\n device (obj:`torch.device`): The device where the points is\n located.\n Returns:\n Tensor: Anchor with shape (N, 2), N should be equal to\n the length of ``prior_idxs``. And last dimension\n 2 represent (coord_x, coord_y).\n ' (height, width) = featmap_size x = (((prior_idxs % width) + self.offset) * self.strides[level_idx][0]) y = ((((prior_idxs // width) % height) + self.offset) * self.strides[level_idx][1]) prioris = torch.stack([x, y], 1).to(dtype) prioris = prioris.to(device) return prioris
class AssignResult(util_mixins.NiceRepr): 'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n ' def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels self._extra_properties = {} @property def num_preds(self): 'int: the number of predictions in this assignment' return len(self.gt_inds) def set_extra_property(self, key, value): 'Set user-defined new property.' assert (key not in self.info) self._extra_properties[key] = value def get_extra_property(self, key): 'Get user-defined property.' return self._extra_properties.get(key, None) @property def info(self): 'dict: a dictionary of info about the object' basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels} basic_info.update(self._extra_properties) return basic_info def __nice__(self): 'str: a "nice" summary string describing this assign result' parts = [] parts.append(f'num_gts={self.num_gts!r}') if (self.gt_inds is None): parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if (self.max_overlaps is None): parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}') if (self.labels is None): parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): 'Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assigned to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA\n >>> self = AssignResult.random()\n >>> print(self.info)\n ' from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if (num_gts is None): num_gts = rng.randint(0, 8) if (num_preds is None): num_preds = rng.randint(0, 16) if (num_gts == 0): max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if ((p_use_label is True) or (p_use_label < rng.rand())): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np max_overlaps = torch.from_numpy(rng.rand(num_preds)) is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned)) n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned) gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds)) gt_inds[is_ignore] = (- 1) gt_inds[(~ is_assigned)] = 0 max_overlaps[(~ is_assigned)] = 0 if ((p_use_label is True) or (p_use_label < rng.rand())): if (num_classes == 0): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds)) labels[(~ is_assigned)] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): 'Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n ' self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if (self.labels is not None): self.labels = torch.cat([gt_labels, self.labels])
class BaseAssigner(metaclass=ABCMeta): 'Base assigner that assigns boxes to ground truth boxes.' @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 'Assign boxes to either a ground truth boxes or a negative boxes.'
@BBOX_ASSIGNERS.register_module() class HungarianAssigner(BaseAssigner): 'Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, regression L1 cost and regression iou cost. The\n targets don\'t include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_weight (int | float, optional): The scale factor for classification\n cost. Default 1.0.\n bbox_weight (int | float, optional): The scale factor for regression\n L1 cost. Default 1.0.\n iou_weight (int | float, optional): The scale factor for regression\n iou cost. Default 1.0.\n iou_calculator (dict | optional): The config for the iou calculation.\n Default type `BboxOverlaps2D`.\n iou_mode (str | optional): "iou" (intersection over union), "iof"\n (intersection over foreground), or "giou" (generalized\n intersection over union). Default "giou".\n ' def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.reg_cost = build_match_cost(reg_cost) self.iou_cost = build_match_cost(iou_cost) def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-07): "Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n " assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.' (num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0)) assigned_gt_inds = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) if ((num_gts == 0) or (num_bboxes == 0)): if (num_gts == 0): assigned_gt_inds[:] = 0 return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) (img_h, img_w, _) = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) cls_cost = self.cls_cost(cls_pred, gt_labels) normalize_gt_bboxes = (gt_bboxes / factor) reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) bboxes = (bbox_cxcywh_to_xyxy(bbox_pred) * factor) iou_cost = self.iou_cost(bboxes, gt_bboxes) cost = ((cls_cost + reg_cost) + iou_cost) cost = cost.detach().cpu() if (linear_sum_assignment is None): raise ImportError('Please run "pip install scipy" to install scipy first.') (matched_row_inds, matched_col_inds) = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(bbox_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to(bbox_pred.device) assigned_gt_inds[:] = 0 assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1) assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)