code
stringlengths
17
6.64M
def test_syntax_error(): temp_cfg_file = tempfile.NamedTemporaryFile(suffix='.py', delete=False) temp_cfg_path = temp_cfg_file.name with open(temp_cfg_path, 'w') as f: f.write('a=0b=dict(c=1)') with pytest.raises(SyntaxError, match='There are syntax errors in config file'): Config.fromfile(temp_cfg_path) temp_cfg_file.close() os.remove(temp_cfg_path)
def test_pickle_support(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: pkl_cfg_filename = osp.join(temp_config_dir, '_pickle.pkl') dump(cfg, pkl_cfg_filename) pkl_cfg = load(pkl_cfg_filename) assert (pkl_cfg._cfg_dict == cfg._cfg_dict)
def test_deprecation(): deprecated_cfg_files = [osp.join(data_path, 'config/deprecated.py'), osp.join(data_path, 'config/deprecated_as_base.py')] for cfg_file in deprecated_cfg_files: with pytest.warns(DeprecationWarning): cfg = Config.fromfile(cfg_file) assert (cfg.item1 == 'expected')
def test_deepcopy(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) new_cfg = copy.deepcopy(cfg) assert isinstance(new_cfg, Config) assert (new_cfg._cfg_dict == cfg._cfg_dict) assert (new_cfg._cfg_dict is not cfg._cfg_dict) assert (new_cfg._filename == cfg._filename) assert (new_cfg._text == cfg._text)
def test_copy(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) new_cfg = copy.copy(cfg) assert isinstance(new_cfg, Config) assert (new_cfg is not cfg) assert (new_cfg._cfg_dict is cfg._cfg_dict) assert (new_cfg._filename == cfg._filename) assert (new_cfg._text == cfg._text)
def test_collect_env(): try: import torch except ModuleNotFoundError: pytest.skip('skipping tests that require PyTorch') from mmcv.utils import collect_env env_info = collect_env() expected_keys = ['sys.platform', 'Python', 'CUDA available', 'PyTorch', 'PyTorch compiling details', 'OpenCV', 'MMCV', 'MMCV Compiler', 'MMCV CUDA Compiler'] for key in expected_keys: assert (key in env_info) if env_info['CUDA available']: for key in ['CUDA_HOME', 'NVCC']: assert (key in env_info) if (sys.platform != 'win32'): assert ('GCC' in env_info) assert (env_info['sys.platform'] == sys.platform) assert (env_info['Python'] == sys.version.replace('\n', '')) assert (env_info['MMCV'] == mmcv.__version__)
def test_load_url(): url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth' url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth' if (digit_version(TORCH_VERSION) < digit_version('1.7.0')): model_zoo.load_url(url1) with pytest.raises(RuntimeError): model_zoo.load_url(url2) else: model_zoo.load_url(url1) model_zoo.load_url(url2) load_url(url1) if (digit_version(TORCH_VERSION) < digit_version('1.5.0')): with pytest.raises(RuntimeError): load_url(url2) else: load_url(url2)
@patch('torch.distributed.get_rank', (lambda : 0)) @patch('torch.distributed.is_initialized', (lambda : True)) @patch('torch.distributed.is_available', (lambda : True)) def test_get_logger_rank0(): logger = get_logger('rank0.pkg1') assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert isinstance(logger.handlers[0], logging.StreamHandler) assert (logger.handlers[0].level == logging.INFO) logger = get_logger('rank0.pkg2', log_level=logging.DEBUG) assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert (logger.handlers[0].level == logging.DEBUG) with tempfile.NamedTemporaryFile(delete=False) as f: logger = get_logger('rank0.pkg3', log_file=f.name) assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 2) assert isinstance(logger.handlers[0], logging.StreamHandler) assert isinstance(logger.handlers[1], logging.FileHandler) logger_pkg3 = get_logger('rank0.pkg3') assert (id(logger_pkg3) == id(logger)) logging.shutdown() os.remove(f.name) logger_pkg3 = get_logger('rank0.pkg3.subpkg') assert (logger_pkg3.handlers == logger_pkg3.handlers)
@patch('torch.distributed.get_rank', (lambda : 1)) @patch('torch.distributed.is_initialized', (lambda : True)) @patch('torch.distributed.is_available', (lambda : True)) def test_get_logger_rank1(): logger = get_logger('rank1.pkg1') assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert isinstance(logger.handlers[0], logging.StreamHandler) assert (logger.handlers[0].level == logging.INFO) with tempfile.NamedTemporaryFile(delete=False) as f: logger = get_logger('rank1.pkg2', log_file=f.name) assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == 1) assert (logger.handlers[0].level == logging.INFO) logging.shutdown() os.remove(f.name)
def test_print_log_print(capsys): print_log('welcome', logger=None) (out, _) = capsys.readouterr() assert (out == 'welcome\n')
def test_print_log_silent(capsys, caplog): print_log('welcome', logger='silent') (out, _) = capsys.readouterr() assert (out == '') assert (len(caplog.records) == 0)
def test_print_log_logger(caplog): print_log('welcome', logger='mmcv') assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.INFO, 'welcome')) print_log('welcome', logger='mmcv', level=logging.ERROR) assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.ERROR, 'welcome')) with tempfile.NamedTemporaryFile(delete=False) as f: logger = get_logger('abc', log_file=f.name) print_log('welcome', logger=logger) assert (caplog.record_tuples[(- 1)] == ('abc', logging.INFO, 'welcome')) with open(f.name, 'r') as fin: log_text = fin.read() regex_time = '\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}' match = re.fullmatch((regex_time + ' - abc - INFO - welcome\\n'), log_text) assert (match is not None) logging.shutdown() os.remove(f.name)
def test_print_log_exception(): with pytest.raises(TypeError): print_log('welcome', logger=0)
def test_to_ntuple(): single_number = 2 assert (mmcv.utils.to_1tuple(single_number) == (single_number,)) assert (mmcv.utils.to_2tuple(single_number) == (single_number, single_number)) assert (mmcv.utils.to_3tuple(single_number) == (single_number, single_number, single_number)) assert (mmcv.utils.to_4tuple(single_number) == (single_number, single_number, single_number, single_number)) assert (mmcv.utils.to_ntuple(5)(single_number) == (single_number, single_number, single_number, single_number, single_number)) assert (mmcv.utils.to_ntuple(6)(single_number) == (single_number, single_number, single_number, single_number, single_number, single_number))
def test_iter_cast(): assert (mmcv.list_cast([1, 2, 3], int) == [1, 2, 3]) assert (mmcv.list_cast(['1.1', 2, '3'], float) == [1.1, 2.0, 3.0]) assert (mmcv.list_cast([1, 2, 3], str) == ['1', '2', '3']) assert (mmcv.tuple_cast((1, 2, 3), str) == ('1', '2', '3')) assert (next(mmcv.iter_cast([1, 2, 3], str)) == '1') with pytest.raises(TypeError): mmcv.iter_cast([1, 2, 3], '') with pytest.raises(TypeError): mmcv.iter_cast(1, str)
def test_is_seq_of(): assert mmcv.is_seq_of([1.0, 2.0, 3.0], float) assert mmcv.is_seq_of([(1,), (2,), (3,)], tuple) assert mmcv.is_seq_of((1.0, 2.0, 3.0), float) assert mmcv.is_list_of([1.0, 2.0, 3.0], float) assert (not mmcv.is_seq_of((1.0, 2.0, 3.0), float, seq_type=list)) assert (not mmcv.is_tuple_of([1.0, 2.0, 3.0], float)) assert (not mmcv.is_seq_of([1.0, 2, 3], int)) assert (not mmcv.is_seq_of((1.0, 2, 3), int))
def test_slice_list(): in_list = [1, 2, 3, 4, 5, 6] assert (mmcv.slice_list(in_list, [1, 2, 3]) == [[1], [2, 3], [4, 5, 6]]) assert (mmcv.slice_list(in_list, [len(in_list)]) == [in_list]) with pytest.raises(TypeError): mmcv.slice_list(in_list, 2.0) with pytest.raises(ValueError): mmcv.slice_list(in_list, [1, 2])
def test_concat_list(): assert (mmcv.concat_list([[1, 2]]) == [1, 2]) assert (mmcv.concat_list([[1, 2], [3, 4, 5], [6]]) == [1, 2, 3, 4, 5, 6])
def test_requires_package(capsys): @mmcv.requires_package('nnn') def func_a(): pass @mmcv.requires_package(['numpy', 'n1', 'n2']) def func_b(): pass @mmcv.requires_package('numpy') def func_c(): return 1 with pytest.raises(RuntimeError): func_a() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n') with pytest.raises(RuntimeError): func_b() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n') assert (func_c() == 1)
def test_requires_executable(capsys): @mmcv.requires_executable('nnn') def func_a(): pass @mmcv.requires_executable(['ls', 'n1', 'n2']) def func_b(): pass @mmcv.requires_executable('mv') def func_c(): return 1 with pytest.raises(RuntimeError): func_a() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n') with pytest.raises(RuntimeError): func_b() (out, _) = capsys.readouterr() assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n') assert (func_c() == 1)
def test_import_modules_from_strings(): import os.path as osp_ import sys as sys_ (osp, sys) = mmcv.import_modules_from_strings(['os.path', 'sys']) assert (osp == osp_) assert (sys == sys_) osp = mmcv.import_modules_from_strings('os.path') assert (osp == osp_) assert (mmcv.import_modules_from_strings(None) is None) assert (mmcv.import_modules_from_strings([]) is None) assert (mmcv.import_modules_from_strings('') is None) with pytest.raises(TypeError): mmcv.import_modules_from_strings(1) with pytest.raises(TypeError): mmcv.import_modules_from_strings([1]) with pytest.raises(ImportError): mmcv.import_modules_from_strings('_not_implemented_module') with pytest.warns(UserWarning): imported = mmcv.import_modules_from_strings('_not_implemented_module', allow_failed_imports=True) assert (imported is None) with pytest.warns(UserWarning): imported = mmcv.import_modules_from_strings(['os.path', '_not_implemented'], allow_failed_imports=True) assert (imported[0] == osp) assert (imported[1] is None)
def test_is_method_overridden(): class Base(): def foo1(): pass def foo2(): pass class Sub(Base): def foo1(): pass assert mmcv.is_method_overridden('foo1', Base, Sub) assert (not mmcv.is_method_overridden('foo2', Base, Sub)) sub_instance = Sub() assert mmcv.is_method_overridden('foo1', Base, sub_instance) assert (not mmcv.is_method_overridden('foo2', Base, sub_instance)) base_instance = Base() with pytest.raises(AssertionError): mmcv.is_method_overridden('foo1', base_instance, sub_instance)
def test_has_method(): class Foo(): def __init__(self, name): self.name = name def print_name(self): print(self.name) foo = Foo('foo') assert (not has_method(foo, 'name')) assert has_method(foo, 'print_name')
def test_deprecated_api_warning(): @deprecated_api_warning(name_dict=dict(old_key='new_key')) def dummy_func(new_key=1): return new_key assert (dummy_func(old_key=2) == 2) with pytest.raises(AssertionError): dummy_func(old_key=1, new_key=2)
class TestJit(object): def test_add_dict(self): @mmcv.jit def add_dict(oper): rets = (oper['x'] + oper['y']) return {'result': rets} def add_dict_pyfunc(oper): rets = (oper['x'] + oper['y']) return {'result': rets} a = torch.rand((3, 4)) b = torch.rand((3, 4)) oper = {'x': a, 'y': b} rets_t = add_dict(oper) rets = add_dict_pyfunc(oper) assert ('result' in rets) assert (rets_t['result'] == rets['result']).all() def test_add_list(self): @mmcv.jit def add_list(oper, x, y): rets = {} for (idx, pair) in enumerate(oper): rets[f'k{idx}'] = (pair['x'] + pair['y']) rets[f'k{len(oper)}'] = (x + y) return rets def add_list_pyfunc(oper, x, y): rets = {} for (idx, pair) in enumerate(oper): rets[f'k{idx}'] = (pair['x'] + pair['y']) rets[f'k{len(oper)}'] = (x + y) return rets pair_num = 3 oper = [] for _ in range(pair_num): oper.append({'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))}) a = torch.rand((3, 4)) b = torch.rand((3, 4)) rets = add_list_pyfunc(oper, x=a, y=b) rets_t = add_list(oper, x=a, y=b) for idx in range((pair_num + 1)): assert (f'k{idx}' in rets_t) assert (rets[f'k{idx}'] == rets_t[f'k{idx}']).all() @skip_no_parrots def test_jit_cache(self): @mmcv.jit def func(oper): if (oper['const'] > 1): return ((oper['x'] * 2) + oper['y']) else: return ((oper['x'] * 2) - oper['y']) def pyfunc(oper): if (oper['const'] > 1): return ((oper['x'] * 2) + oper['y']) else: return ((oper['x'] * 2) - oper['y']) assert (len(func._cache._cache) == 0) oper = {'const': 2, 'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))} rets_plus = pyfunc(oper) rets_plus_t = func(oper) assert (rets_plus == rets_plus_t).all() assert (len(func._cache._cache) == 1) oper['const'] = 0.5 rets_minus = pyfunc(oper) rets_minus_t = func(oper) assert (rets_minus == rets_minus_t).all() assert (len(func._cache._cache) == 2) rets_a = ((rets_minus_t + rets_plus_t) / 4) assert torch.allclose(oper['x'], rets_a) @skip_no_parrots def test_jit_shape(self): @mmcv.jit def func(a): return (a + 1) assert (len(func._cache._cache) == 0) a = torch.ones((3, 4)) r = func(a) assert (r.shape == (3, 4)) assert (r == 2).all() assert (len(func._cache._cache) == 1) a = torch.ones((2, 3, 4)) r = func(a) assert (r.shape == (2, 3, 4)) assert (r == 2).all() assert (len(func._cache._cache) == 2) @skip_no_parrots def test_jit_kwargs(self): @mmcv.jit def func(a, b): return torch.mean(((a - b) * (a - b))) assert (len(func._cache._cache) == 0) x = torch.rand((16, 32)) y = torch.rand((16, 32)) func(x, y) assert (len(func._cache._cache) == 1) func(x, b=y) assert (len(func._cache._cache) == 1) func(b=y, a=x) assert (len(func._cache._cache) == 1) def test_jit_derivate(self): @mmcv.jit(derivate=True) def func(x, y): return ((x + 2) * (y - 2)) a = torch.rand((3, 4)) b = torch.rand((3, 4)) a.requires_grad = True c = func(a, b) assert c.requires_grad d = torch.empty_like(c) d.fill_(1.0) c.backward(d) assert torch.allclose(a.grad, (b - 2)) assert (b.grad is None) a.grad = None c = func(a, b) assert c.requires_grad d = torch.empty_like(c) d.fill_(2.7) c.backward(d) assert torch.allclose(a.grad, (2.7 * (b - 2))) assert (b.grad is None) def test_jit_optimize(self): @mmcv.jit(optimize=True) def func(a, b): return torch.mean(((a - b) * (a - b))) def pyfunc(a, b): return torch.mean(((a - b) * (a - b))) a = torch.rand((16, 32)) b = torch.rand((16, 32)) c = func(a, b) d = pyfunc(a, b) assert torch.allclose(c, d) @mmcv.skip_no_elena def test_jit_coderize(self): if (not torch.cuda.is_available()): return @mmcv.jit(coderize=True) def func(a, b): return ((a + b) * (a - b)) def pyfunc(a, b): return ((a + b) * (a - b)) a = torch.rand((16, 32), device='cuda') b = torch.rand((16, 32), device='cuda') c = func(a, b) d = pyfunc(a, b) assert torch.allclose(c, d) def test_jit_value_dependent(self): @mmcv.jit def func(a, b): torch.nonzero(a) return torch.mean(((a - b) * (a - b))) def pyfunc(a, b): torch.nonzero(a) return torch.mean(((a - b) * (a - b))) a = torch.rand((16, 32)) b = torch.rand((16, 32)) c = func(a, b) d = pyfunc(a, b) assert torch.allclose(c, d) @skip_no_parrots def test_jit_check_input(self): def func(x): y = torch.rand_like(x) return (x + y) a = torch.ones((3, 4)) with pytest.raises(AssertionError): func = mmcv.jit(func, check_input=(a,)) @skip_no_parrots def test_jit_partial_shape(self): @mmcv.jit(full_shape=False) def func(a, b): return torch.mean(((a - b) * (a - b))) def pyfunc(a, b): return torch.mean(((a - b) * (a - b))) a = torch.rand((3, 4)) b = torch.rand((3, 4)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 1) a = torch.rand((6, 5)) b = torch.rand((6, 5)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 1) a = torch.rand((3, 4, 5)) b = torch.rand((3, 4, 5)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 2) a = torch.rand((1, 9, 8)) b = torch.rand((1, 9, 8)) assert torch.allclose(func(a, b), pyfunc(a, b)) assert (len(func._cache._cache) == 2) def test_instance_method(self): class T(object): def __init__(self, shape): self._c = torch.rand(shape) @mmcv.jit def test_method(self, x, y): return ((x * self._c) + y) shape = (16, 32) t = T(shape) a = torch.rand(shape) b = torch.rand(shape) res = ((a * t._c) + b) jit_res = t.test_method(a, b) assert torch.allclose(res, jit_res) t = T(shape) res = ((a * t._c) + b) jit_res = t.test_method(a, b) assert torch.allclose(res, jit_res)
def test_is_filepath(): assert mmcv.is_filepath(__file__) assert mmcv.is_filepath('abc') assert mmcv.is_filepath(Path('/etc')) assert (not mmcv.is_filepath(0))
def test_fopen(): assert hasattr(mmcv.fopen(__file__), 'read') assert hasattr(mmcv.fopen(Path(__file__)), 'read')
def test_check_file_exist(): mmcv.check_file_exist(__file__) with pytest.raises(FileNotFoundError): mmcv.check_file_exist('no_such_file.txt')
def test_scandir(): folder = osp.join(osp.dirname(osp.dirname(__file__)), 'data/for_scan') filenames = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT'] assert (set(mmcv.scandir(folder)) == set(filenames)) assert (set(mmcv.scandir(Path(folder))) == set(filenames)) assert (set(mmcv.scandir(folder, '.txt')) == set([filename for filename in filenames if filename.endswith('.txt')])) assert (set(mmcv.scandir(folder, ('.json', '.txt'))) == set([filename for filename in filenames if filename.endswith(('.txt', '.json'))])) assert (set(mmcv.scandir(folder, '.png')) == set()) filenames_recursive = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT', osp.join('sub', '1.json'), osp.join('sub', '1.txt'), '.file'] assert (set(mmcv.scandir(folder, recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')])) assert (set(mmcv.scandir(Path(folder), recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')])) assert (set(mmcv.scandir(folder, '.txt', recursive=True)) == set([filename for filename in filenames_recursive if filename.endswith('.txt')])) assert (set(mmcv.scandir(folder, '.TXT', recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.TXT'))])) assert (set(mmcv.scandir(folder, ('.TXT', '.JSON'), recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.json', '.TXT'))])) with pytest.raises(TypeError): list(mmcv.scandir(123)) with pytest.raises(TypeError): list(mmcv.scandir(folder, 111))
def reset_string_io(io): io.truncate(0) io.seek(0)
class TestProgressBar(): def test_start(self): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out) assert (out.getvalue() == 'completed: 0, elapsed: 0s') reset_string_io(out) prog_bar = mmcv.ProgressBar(bar_width=bar_width, start=False, file=out) assert (out.getvalue() == '') reset_string_io(out) prog_bar.start() assert (out.getvalue() == 'completed: 0, elapsed: 0s') reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:") reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, start=False, file=out) assert (out.getvalue() == '') reset_string_io(out) prog_bar.start() assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:") def test_update(self): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (out.getvalue() == 'completed: 1, elapsed: 1s, 1.0 tasks/s') reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (out.getvalue() == f''' [{(('>' * 2) + (' ' * 18))}] 1/10, 1.0 task/s, elapsed: 1s, ETA: 9s''') def test_adaptive_length(self): with patch.dict('os.environ', {'COLUMNS': '80'}): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 66) os.environ['COLUMNS'] = '30' reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 48) os.environ['COLUMNS'] = '60' reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 60)
def sleep_1s(num): time.sleep(1) return num
def test_track_progress_list(): out = StringIO() ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_progress_iterator(): out = StringIO() ret = mmcv.track_progress(sleep_1s, ((i for i in [1, 2, 3]), 3), bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_iter_progress(): out = StringIO() ret = [] for num in mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out): ret.append(sleep_1s(num)) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_enum_progress(): out = StringIO() ret = [] count = [] for (i, num) in enumerate(mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out)): ret.append(sleep_1s(num)) count.append(i) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3]) assert (count == [0, 1, 2])
def test_track_parallel_progress_list(): out = StringIO() results = mmcv.track_parallel_progress(sleep_1s, [1, 2, 3, 4], 2, bar_width=4, file=out) assert (results == [1, 2, 3, 4])
def test_track_parallel_progress_iterator(): out = StringIO() results = mmcv.track_parallel_progress(sleep_1s, ((i for i in [1, 2, 3, 4]), 4), 2, bar_width=4, file=out) assert (results == [1, 2, 3, 4])
def test_registry(): CATS = mmcv.Registry('cat') assert (CATS.name == 'cat') assert (CATS.module_dict == {}) assert (len(CATS) == 0) @CATS.register_module() class BritishShorthair(): pass assert (len(CATS) == 1) assert (CATS.get('BritishShorthair') is BritishShorthair) class Munchkin(): pass CATS.register_module(Munchkin) assert (len(CATS) == 2) assert (CATS.get('Munchkin') is Munchkin) assert ('Munchkin' in CATS) with pytest.raises(KeyError): CATS.register_module(Munchkin) CATS.register_module(Munchkin, force=True) assert (len(CATS) == 2) with pytest.raises(KeyError): @CATS.register_module() class BritishShorthair(): pass @CATS.register_module(force=True) class BritishShorthair(): pass assert (len(CATS) == 2) assert (CATS.get('PersianCat') is None) assert ('PersianCat' not in CATS) @CATS.register_module(name=['Siamese', 'Siamese2']) class SiameseCat(): pass assert (CATS.get('Siamese').__name__ == 'SiameseCat') assert (CATS.get('Siamese2').__name__ == 'SiameseCat') class SphynxCat(): pass CATS.register_module(name='Sphynx', module=SphynxCat) assert (CATS.get('Sphynx') is SphynxCat) CATS.register_module(name=['Sphynx1', 'Sphynx2'], module=SphynxCat) assert (CATS.get('Sphynx2') is SphynxCat) repr_str = 'Registry(name=cat, items={' repr_str += "'BritishShorthair': <class 'test_registry.test_registry.<locals>.BritishShorthair'>, " repr_str += "'Munchkin': <class 'test_registry.test_registry.<locals>.Munchkin'>, " repr_str += "'Siamese': <class 'test_registry.test_registry.<locals>.SiameseCat'>, " repr_str += "'Siamese2': <class 'test_registry.test_registry.<locals>.SiameseCat'>, " repr_str += "'Sphynx': <class 'test_registry.test_registry.<locals>.SphynxCat'>, " repr_str += "'Sphynx1': <class 'test_registry.test_registry.<locals>.SphynxCat'>, " repr_str += "'Sphynx2': <class 'test_registry.test_registry.<locals>.SphynxCat'>" repr_str += '})' assert (repr(CATS) == repr_str) with pytest.raises(TypeError): CATS.register_module(name=7474741, module=SphynxCat) with pytest.raises(TypeError): CATS.register_module(0) with pytest.raises(TypeError): @CATS.register_module() def some_method(): pass with pytest.warns(DeprecationWarning): CATS.register_module(SphynxCat) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): CATS.register_module(SphynxCat, force=True) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): @CATS.register_module class NewCat(): pass assert (CATS.get('NewCat').__name__ == 'NewCat') with pytest.warns(DeprecationWarning): CATS.deprecated_register_module(SphynxCat, force=True) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): @CATS.deprecated_register_module class CuteCat(): pass assert (CATS.get('CuteCat').__name__ == 'CuteCat') with pytest.warns(DeprecationWarning): @CATS.deprecated_register_module(force=True) class NewCat2(): pass assert (CATS.get('NewCat2').__name__ == 'NewCat2')
def test_multi_scope_registry(): DOGS = mmcv.Registry('dogs') assert (DOGS.name == 'dogs') assert (DOGS.scope == 'test_registry') assert (DOGS.module_dict == {}) assert (len(DOGS) == 0) @DOGS.register_module() class GoldenRetriever(): pass assert (len(DOGS) == 1) assert (DOGS.get('GoldenRetriever') is GoldenRetriever) HOUNDS = mmcv.Registry('dogs', parent=DOGS, scope='hound') @HOUNDS.register_module() class BloodHound(): pass assert (len(HOUNDS) == 1) assert (HOUNDS.get('BloodHound') is BloodHound) assert (DOGS.get('hound.BloodHound') is BloodHound) assert (HOUNDS.get('hound.BloodHound') is BloodHound) LITTLE_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='little_hound') @LITTLE_HOUNDS.register_module() class Dachshund(): pass assert (len(LITTLE_HOUNDS) == 1) assert (LITTLE_HOUNDS.get('Dachshund') is Dachshund) assert (LITTLE_HOUNDS.get('hound.BloodHound') is BloodHound) assert (HOUNDS.get('little_hound.Dachshund') is Dachshund) assert (DOGS.get('hound.little_hound.Dachshund') is Dachshund) MID_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='mid_hound') @MID_HOUNDS.register_module() class Beagle(): pass assert (MID_HOUNDS.get('Beagle') is Beagle) assert (HOUNDS.get('mid_hound.Beagle') is Beagle) assert (DOGS.get('hound.mid_hound.Beagle') is Beagle) assert (LITTLE_HOUNDS.get('hound.mid_hound.Beagle') is Beagle) assert (MID_HOUNDS.get('hound.BloodHound') is BloodHound) assert (MID_HOUNDS.get('hound.Dachshund') is None)
def test_build_from_cfg(): BACKBONES = mmcv.Registry('backbone') @BACKBONES.register_module() class ResNet(): def __init__(self, depth, stages=4): self.depth = depth self.stages = stages @BACKBONES.register_module() class ResNeXt(): def __init__(self, depth, stages=4): self.depth = depth self.stages = stages cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args={'stages': 3}) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 3)) cfg = dict(type='ResNeXt', depth=50, stages=3) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNeXt) assert ((model.depth == 50) and (model.stages == 3)) cfg = dict(type=ResNet, depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type='ResNet')) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type=ResNet)) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) with pytest.raises(TypeError): cfg = dict(type='VGG') model = mmcv.build_from_cfg(cfg, 'BACKBONES') with pytest.raises(KeyError): cfg = dict(type='VGG') model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=1) with pytest.raises(TypeError): cfg = dict(type=1000) model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(KeyError, match='must contain the key "type"'): cfg = dict(depth=50, stages=4) model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(KeyError, match='must contain the key "type"'): cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(stages=4)) with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, 'BACKBONES') with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=0) with pytest.raises(TypeError): cfg = dict(type='ResNet', non_existing_arg=50) model = mmcv.build_from_cfg(cfg, BACKBONES)
def test_assert_dict_contains_subset(): dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6)} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6)} assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset) expected_subset = {'a': 'test1', 'b': 2, 'c': (6, 4)} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'a': 'test1', 'b': 2, 'c': None} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'a': 'test1', 'b': 2, 'd': (4, 6)} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [1, 2, 3]])} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [6, 2, 3]])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])} assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset) if (torch is not None): dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': torch.tensor([5, 3, 5])} expected_subset = {'d': torch.tensor([5, 5, 5])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'d': torch.tensor([[5, 3, 5], [4, 1, 2]])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
def test_assert_attrs_equal(): class TestExample(object): (a, b, c) = (1, ('wvi', 3), [4.5, 3.14]) def test_func(self): return self.b assert mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14]}) assert (not mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14, 2]})) assert (not mmcv.assert_attrs_equal(TestExample, {'bc': 54, 'c': [4.5, 3.14]})) assert mmcv.assert_attrs_equal(TestExample, {'b': ('wvi', 3), 'test_func': TestExample.test_func}) if (torch is not None): class TestExample(object): (a, b) = (torch.tensor([1]), torch.tensor([4, 5])) assert mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 5])}) assert (not mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 6])}))
@pytest.mark.parametrize('obj', assert_dict_has_keys_data_1) @pytest.mark.parametrize('expected_keys, ret_value', assert_dict_has_keys_data_2) def test_assert_dict_has_keys(obj, expected_keys, ret_value): assert (mmcv.assert_dict_has_keys(obj, expected_keys) == ret_value)
@pytest.mark.parametrize('result_keys', assert_keys_equal_data_1) @pytest.mark.parametrize('target_keys, ret_value', assert_keys_equal_data_2) def test_assert_keys_equal(result_keys, target_keys, ret_value): assert (mmcv.assert_keys_equal(result_keys, target_keys) == ret_value)
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_assert_is_norm_layer(): assert (not mmcv.assert_is_norm_layer(nn.Conv3d(3, 64, 3))) assert mmcv.assert_is_norm_layer(nn.BatchNorm3d(128)) assert mmcv.assert_is_norm_layer(nn.GroupNorm(8, 64)) assert (not mmcv.assert_is_norm_layer(nn.Sigmoid()))
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_assert_params_all_zeros(): demo_module = nn.Conv2d(3, 64, 3) nn.init.constant_(demo_module.weight, 0) nn.init.constant_(demo_module.bias, 0) assert mmcv.assert_params_all_zeros(demo_module) nn.init.xavier_normal_(demo_module.weight) nn.init.constant_(demo_module.bias, 0) assert (not mmcv.assert_params_all_zeros(demo_module)) demo_module = nn.Linear(2048, 400, bias=False) nn.init.constant_(demo_module.weight, 0) assert mmcv.assert_params_all_zeros(demo_module) nn.init.normal_(demo_module.weight, mean=0, std=0.01) assert (not mmcv.assert_params_all_zeros(demo_module))
def test_check_python_script(capsys): mmcv.utils.check_python_script('./tests/data/scripts/hello.py zz') captured = capsys.readouterr().out assert (captured == 'hello zz!\n') mmcv.utils.check_python_script('./tests/data/scripts/hello.py agent') captured = capsys.readouterr().out assert (captured == 'hello agent!\n') with pytest.raises(SystemExit): mmcv.utils.check_python_script('./tests/data/scripts/hello.py li zz')
def test_timer_init(): timer = mmcv.Timer(start=False) assert (not timer.is_running) timer.start() assert timer.is_running timer = mmcv.Timer() assert timer.is_running
def test_timer_run(): timer = mmcv.Timer() time.sleep(1) assert (abs((timer.since_start() - 1)) < 0.01) time.sleep(1) assert (abs((timer.since_last_check() - 1)) < 0.01) assert (abs((timer.since_start() - 2)) < 0.01) timer = mmcv.Timer(False) with pytest.raises(mmcv.TimerError): timer.since_start() with pytest.raises(mmcv.TimerError): timer.since_last_check()
def test_timer_context(capsys): with mmcv.Timer(): time.sleep(1) (out, _) = capsys.readouterr() assert (abs((float(out) - 1)) < 0.01) with mmcv.Timer(print_tmpl='time: {:.1f}s'): time.sleep(1) (out, _) = capsys.readouterr() assert (out == 'time: 1.0s\n')
@pytest.mark.skipif((digit_version(torch.__version__) < digit_version('1.6.0')), reason='torch.jit.is_tracing is not available before 1.6.0') def test_is_jit_tracing(): def foo(x): if is_jit_tracing(): return x else: return x.tolist() x = torch.rand(3) assert isinstance(foo(x), list) traced_foo = torch.jit.trace(foo, (torch.rand(1),)) assert isinstance(traced_foo(x), torch.Tensor)
def test_digit_version(): assert (digit_version('0.2.16') == (0, 2, 16, 0, 0, 0)) assert (digit_version('1.2.3') == (1, 2, 3, 0, 0, 0)) assert (digit_version('1.2.3rc0') == (1, 2, 3, 0, (- 1), 0)) assert (digit_version('1.2.3rc1') == (1, 2, 3, 0, (- 1), 1)) assert (digit_version('1.0rc0') == (1, 0, 0, 0, (- 1), 0)) assert (digit_version('1.0') == digit_version('1.0.0')) assert (digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5')) assert (digit_version('1.0.0dev') < digit_version('1.0.0a')) assert (digit_version('1.0.0a') < digit_version('1.0.0a1')) assert (digit_version('1.0.0a') < digit_version('1.0.0b')) assert (digit_version('1.0.0b') < digit_version('1.0.0rc')) assert (digit_version('1.0.0rc1') < digit_version('1.0.0')) assert (digit_version('1.0.0') < digit_version('1.0.0post')) assert (digit_version('1.0.0post') < digit_version('1.0.0post1')) assert (digit_version('v1') == (1, 0, 0, 0, 0, 0)) assert (digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)) with pytest.raises(AssertionError): digit_version('a') with pytest.raises(AssertionError): digit_version('1x') with pytest.raises(AssertionError): digit_version('1.x')
def test_parse_version_info(): assert (parse_version_info('0.2.16') == (0, 2, 16, 0, 0, 0)) assert (parse_version_info('1.2.3') == (1, 2, 3, 0, 0, 0)) assert (parse_version_info('1.2.3rc0') == (1, 2, 3, 0, 'rc', 0)) assert (parse_version_info('1.2.3rc1') == (1, 2, 3, 0, 'rc', 1)) assert (parse_version_info('1.0rc0') == (1, 0, 0, 0, 'rc', 0))
def _mock_cmd_success(cmd): return '3b46d33e90c397869ad5103075838fdfc9812aa0'.encode('ascii')
def _mock_cmd_fail(cmd): raise OSError
def test_get_git_hash(): with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success): assert (get_git_hash() == '3b46d33e90c397869ad5103075838fdfc9812aa0') assert (get_git_hash(digits=6) == '3b46d3') assert (get_git_hash(digits=100) == get_git_hash()) with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_fail): assert (get_git_hash() == 'unknown') assert (get_git_hash(fallback='n/a') == 'n/a')
class TestVideoEditor(): @classmethod def setup_class(cls): cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4') cls.num_frames = 168 @pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows') def test_cut_concat_video(self): part1_file = osp.join(tempfile.gettempdir(), '.mmcv_test1.mp4') part2_file = osp.join(tempfile.gettempdir(), '.mmcv_test2.mp4') mmcv.cut_video(self.video_path, part1_file, end=3, vcodec='h264') mmcv.cut_video(self.video_path, part2_file, start=3, vcodec='h264') v1 = mmcv.VideoReader(part1_file) v2 = mmcv.VideoReader(part2_file) assert (len(v1) == 75) assert (len(v2) == (self.num_frames - 75)) out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4') mmcv.concat_video([part1_file, part2_file], out_file) v = mmcv.VideoReader(out_file) assert (len(v) == self.num_frames) os.remove(part1_file) os.remove(part2_file) os.remove(out_file) @pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows') def test_resize_video(self): out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4') mmcv.resize_video(self.video_path, out_file, (200, 100), log_level='panic') v = mmcv.VideoReader(out_file) assert (v.resolution == (200, 100)) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, ratio=2) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), (240 * 2))) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, (1000, 480), keep_ar=True) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), (240 * 2))) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, ratio=(2, 1.5), keep_ar=True) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), 360)) os.remove(out_file)
class TestCache(): def test_init(self): with pytest.raises(ValueError): mmcv.Cache(0) cache = mmcv.Cache(100) assert (cache.capacity == 100) assert (cache.size == 0) def test_put(self): cache = mmcv.Cache(3) for i in range(1, 4): cache.put(f'k{i}', i) assert (cache.size == i) assert (cache._cache == OrderedDict([('k1', 1), ('k2', 2), ('k3', 3)])) cache.put('k4', 4) assert (cache.size == 3) assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])) cache.put('k2', 2) assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])) def test_get(self): cache = mmcv.Cache(3) assert (cache.get('key_none') is None) assert (cache.get('key_none', 0) == 0) cache.put('k1', 1) assert (cache.get('k1') == 1)
class TestVideoReader(): @classmethod def setup_class(cls): cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4') cls.num_frames = 168 cls.video_url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4' def test_load(self): v = mmcv.VideoReader(self.video_path) assert (v.width == 294) assert (v.height == 240) assert (v.fps == 25) assert (v.frame_cnt == self.num_frames) assert (len(v) == self.num_frames) assert v.opened import cv2 assert isinstance(v.vcap, type(cv2.VideoCapture())) v = mmcv.VideoReader(self.video_url) assert (v.width == 320) assert (v.height == 240) assert (v.fps == 15) assert (v.frame_cnt == 1889) assert (len(v) == 1889) assert v.opened assert isinstance(v.vcap, type(cv2.VideoCapture())) def test_read(self): v = mmcv.VideoReader(self.video_path) img = v.read() assert (int(round(img.mean())) == 94) img = v.get_frame(63) assert (int(round(img.mean())) == 94) img = v[64] assert (int(round(img.mean())) == 205) img = v[(- 104)] assert (int(round(img.mean())) == 205) img = v[63] assert (int(round(img.mean())) == 94) img = v[(- 105)] assert (int(round(img.mean())) == 94) img = v.read() assert (int(round(img.mean())) == 205) with pytest.raises(IndexError): v.get_frame((self.num_frames + 1)) with pytest.raises(IndexError): v[((- self.num_frames) - 1)] def test_slice(self): v = mmcv.VideoReader(self.video_path) imgs = v[(- 105):(- 103)] assert (int(round(imgs[0].mean())) == 94) assert (int(round(imgs[1].mean())) == 205) assert (len(imgs) == 2) imgs = v[63:65] assert (int(round(imgs[0].mean())) == 94) assert (int(round(imgs[1].mean())) == 205) assert (len(imgs) == 2) imgs = v[64:62:(- 1)] assert (int(round(imgs[0].mean())) == 205) assert (int(round(imgs[1].mean())) == 94) assert (len(imgs) == 2) imgs = v[:5] assert (len(imgs) == 5) for img in imgs: assert (int(round(img.mean())) == 94) imgs = v[165:] assert (len(imgs) == 3) for img in imgs: assert (int(round(img.mean())) == 0) imgs = v[(- 3):] assert (len(imgs) == 3) for img in imgs: assert (int(round(img.mean())) == 0) def test_current_frame(self): v = mmcv.VideoReader(self.video_path) assert (v.current_frame() is None) v.read() img = v.current_frame() assert (int(round(img.mean())) == 94) def test_position(self): v = mmcv.VideoReader(self.video_path) assert (v.position == 0) for _ in range(10): v.read() assert (v.position == 10) v.get_frame(99) assert (v.position == 100) def test_iterator(self): cnt = 0 for img in mmcv.VideoReader(self.video_path): cnt += 1 assert (img.shape == (240, 294, 3)) assert (cnt == self.num_frames) def test_with(self): with mmcv.VideoReader(self.video_path) as v: assert v.opened assert (not v.opened) def test_cvt2frames(self): v = mmcv.VideoReader(self.video_path) frame_dir = tempfile.mkdtemp() v.cvt2frames(frame_dir) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) os.remove(filename) v = mmcv.VideoReader(self.video_path) v.cvt2frames(frame_dir, show_progress=False) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) os.remove(filename) v = mmcv.VideoReader(self.video_path) v.cvt2frames(frame_dir, file_start=100, filename_tmpl='{:03d}.JPEG', start=100, max_num=20) assert osp.isdir(frame_dir) for i in range(100, 120): filename = f'{frame_dir}/{i:03d}.JPEG' assert osp.isfile(filename) os.remove(filename) shutil.rmtree(frame_dir) def test_frames2video(self): v = mmcv.VideoReader(self.video_path) frame_dir = tempfile.mkdtemp() v.cvt2frames(frame_dir) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) out_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.avi') mmcv.frames2video(frame_dir, out_filename) v = mmcv.VideoReader(out_filename) assert (v.fps == 30) assert (len(v) == self.num_frames) mmcv.frames2video(frame_dir, out_filename, fps=25, start=10, end=50, show_progress=False) with mmcv.VideoReader(out_filename) as v: assert (v.fps == 25) assert (len(v) == 40) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' os.remove(filename) shutil.rmtree(frame_dir)
def test_color(): assert (mmcv.color_val(mmcv.Color.blue) == (255, 0, 0)) assert (mmcv.color_val('green') == (0, 255, 0)) assert (mmcv.color_val((1, 2, 3)) == (1, 2, 3)) assert (mmcv.color_val(100) == (100, 100, 100)) assert (mmcv.color_val(np.zeros(3, dtype=int)) == (0, 0, 0)) with pytest.raises(TypeError): mmcv.color_val([255, 255, 255]) with pytest.raises(TypeError): mmcv.color_val(1.0) with pytest.raises(AssertionError): mmcv.color_val((0, 0, 500))
def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif (x.find('rc') != (- 1)): patch_version = x.split('rc') digit_version.append((int(patch_version[0]) - 1)) digit_version.append(int(patch_version[1])) return digit_version
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): 'Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n cfg_options (dict): Options to override some settings in the used\n config.\n\n Returns:\n nn.Module: The constructed detector.\n ' if isinstance(config, str): config = mmcv.Config.fromfile(config) elif (not isinstance(config, mmcv.Config)): raise TypeError(f'config must be a filename or Config object, but got {type(config)}') if (cfg_options is not None): config.merge_from_dict(cfg_options) if ('pretrained' in config.model): config.model.pretrained = None elif ('init_cfg' in config.model.backbone): config.model.backbone.init_cfg = None config.model.train_cfg = None model = build_detector(config.model, test_cfg=config.get('test_cfg')) if (checkpoint is not None): checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') if ('CLASSES' in checkpoint.get('meta', {})): model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.") model.CLASSES = get_classes('coco') model.cfg = config model.to(device) model.eval() return model
class LoadImage(): 'Deprecated.\n\n A simple pipeline to load image.\n ' def __call__(self, results): 'Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n Returns:\n dict: ``results`` will be returned containing loaded image.\n ' warnings.simplefilter('once') warnings.warn('`LoadImage` is deprecated and will be removed in future releases. You may use `LoadImageFromWebcam` from `mmdet.datasets.pipelines.` instead.') if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
def inference_detector(model, imgs): 'Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n Either image files or loaded images.\n\n Returns:\n If imgs is a list or tuple, the same length list type results\n will be returned, otherwise return the detection results directly.\n ' if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg device = next(model.parameters()).device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: if isinstance(img, np.ndarray): data = dict(img=img) else: data = dict(img_info=dict(filename=img), img_prefix=None) data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: data = scatter(data, [device])[0] else: for m in model.modules(): assert (not isinstance(m, RoIPool)), 'CPU inference with RoIPool is not supported currently.' with torch.no_grad(): results = model(return_loss=False, rescale=True, **data) if (not is_batch): return results[0] else: return results
def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None): 'Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n score_thr (float): The threshold to visualize the bboxes and masks.\n title (str): Title of the pyplot figure.\n wait_time (float): Value of waitKey param.\n Default: 0.\n ' if hasattr(model, 'module'): model = model.module model.show_result(img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=palette, text_color=(200, 200, 200), mask_color=palette)
def init_random_seed(seed=None, device='cuda'): "Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n " if (seed is not None): return seed (rank, world_size) = get_dist_info() seed = np.random.randint((2 ** 31)) if (world_size == 1): return seed if (rank == 0): random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
def set_random_seed(seed, deterministic=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n ' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(log_level=cfg.log_level) dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) if ('imgs_per_gpu' in cfg.data): logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead') if ('samples_per_gpu' in cfg.data): logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments') else: logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu runner_type = ('EpochBasedRunner' if ('runner' not in cfg) else cfg.runner['type']) data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=cfg.data.get('persistent_workers', False)) for ds in dataset] if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = MMDataParallel(model, device_ids=cfg.gpu_ids) optimizer = build_optimizer(model, cfg.optimizer) if ('runner' not in cfg): cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs} warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning) elif ('total_epochs' in cfg): assert (cfg.total_epochs == cfg.runner.max_epochs) runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) runner.timestamp = timestamp fp16_cfg = cfg.get('fp16', None) if (fp16_cfg is not None): optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif (distributed and ('type' not in cfg.optimizer_config)): optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) if validate: val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) if (val_samples_per_gpu > 1): cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner') eval_hook = (DistEvalHook if distributed else EvalHook) runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW') resume_from = None if ((cfg.resume_from is None) and cfg.get('auto_resume')): resume_from = find_latest_checkpoint(cfg.work_dir) if (resume_from is not None): cfg.resume_from = resume_from if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow)
def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None): warnings.warn('``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` ') return build_prior_generator(cfg, default_args=default_args)
@PRIOR_GENERATORS.register_module() class PointGenerator(): def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1)) if row_major: return (xx, yy) else: return (yy, xx) def grid_points(self, featmap_size, stride=16, device='cuda'): (feat_h, feat_w) = featmap_size shift_x = (torch.arange(0.0, feat_w, device=device) * stride) shift_y = (torch.arange(0.0, feat_h, device=device) * stride) (shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0],), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=(- 1)) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): (feat_h, feat_w) = featmap_size (valid_h, valid_w) = valid_size assert ((valid_h <= feat_h) and (valid_w <= feat_w)) valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 (valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y) valid = (valid_xx & valid_yy) return valid
@PRIOR_GENERATORS.register_module() class MlvlPointGenerator(): 'Standard points generator for multi-level (Mlvl) feature maps in 2D\n points-based detectors.\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels in order (w, h).\n offset (float): The offset of points, the value is normalized with\n corresponding stride. Defaults to 0.5.\n ' def __init__(self, strides, offset=0.5): self.strides = [_pair(stride) for stride in strides] self.offset = offset @property def num_levels(self): 'int: number of feature levels that the generator will be applied' return len(self.strides) @property def num_base_priors(self): 'list[int]: The number of priors (points) at a point\n on the feature grid' return [1 for _ in range(len(self.strides))] def _meshgrid(self, x, y, row_major=True): (yy, xx) = torch.meshgrid(y, x) if row_major: return (xx.reshape((- 1)), yy.reshape((- 1))) else: return (yy.reshape((- 1)), xx.reshape((- 1))) def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda', with_stride=False): 'Generate grid points of multiple feature levels.\n\n Args:\n featmap_sizes (list[tuple]): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str): The device where the anchors will be put on.\n with_stride (bool): Whether to concatenate the stride to\n the last dimension of points.\n\n Return:\n list[torch.Tensor]: Points of multiple feature levels.\n The sizes of each tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n ' assert (self.num_levels == len(featmap_sizes)) multi_level_priors = [] for i in range(self.num_levels): priors = self.single_level_grid_priors(featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False): "Generate grid Points of a single level.\n\n Note:\n This function is usually called by method ``self.grid_priors``.\n\n Args:\n featmap_size (tuple[int]): Size of the feature maps, arrange as\n (h, w).\n level_idx (int): The index of corresponding feature map level.\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str, optional): The device the tensor will be put on.\n Defaults to 'cuda'.\n with_stride (bool): Concatenate the stride to the last dimension\n of points.\n\n Return:\n Tensor: Points of single feature levels.\n The shape of tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n " (feat_h, feat_w) = featmap_size (stride_w, stride_h) = self.strides[level_idx] shift_x = ((torch.arange(0, feat_w, device=device) + self.offset) * stride_w) shift_x = shift_x.to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + self.offset) * stride_h) shift_y = shift_y.to(dtype) (shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y) if (not with_stride): shifts = torch.stack([shift_xx, shift_yy], dim=(- 1)) else: stride_w = shift_xx.new_full((shift_xx.shape[0],), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0],), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=(- 1)) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): 'Generate valid flags of points of multiple feature levels.\n\n Args:\n featmap_sizes (list(tuple)): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n pad_shape (tuple(int)): The padded shape of the image,\n arrange as (h, w).\n device (str): The device where the anchors will be put on.\n\n Return:\n list(torch.Tensor): Valid flags of points of multiple levels.\n ' assert (self.num_levels == len(featmap_sizes)) multi_level_flags = [] for i in range(self.num_levels): point_stride = self.strides[i] (feat_h, feat_w) = featmap_sizes[i] (h, w) = pad_shape[:2] valid_feat_h = min(int(np.ceil((h / point_stride[1]))), feat_h) valid_feat_w = min(int(np.ceil((w / point_stride[0]))), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, device='cuda'): "Generate the valid flags of points of a single feature map.\n\n Args:\n featmap_size (tuple[int]): The size of feature maps, arrange as\n as (h, w).\n valid_size (tuple[int]): The valid size of the feature maps.\n The size arrange as as (h, w).\n device (str, optional): The device where the flags will be put on.\n Defaults to 'cuda'.\n\n Returns:\n torch.Tensor: The valid flags of each points in a single level feature map.\n " (feat_h, feat_w) = featmap_size (valid_h, valid_w) = valid_size assert ((valid_h <= feat_h) and (valid_w <= feat_w)) valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 (valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y) valid = (valid_xx & valid_yy) return valid def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): 'Generate sparse points according to the ``prior_idxs``.\n\n Args:\n prior_idxs (Tensor): The index of corresponding anchors\n in the feature map.\n featmap_size (tuple[int]): feature map size arrange as (w, h).\n level_idx (int): The level index of corresponding feature\n map.\n dtype (obj:`torch.dtype`): Date type of points. Defaults to\n ``torch.float32``.\n device (obj:`torch.device`): The device where the points is\n located.\n Returns:\n Tensor: Anchor with shape (N, 2), N should be equal to\n the length of ``prior_idxs``. And last dimension\n 2 represent (coord_x, coord_y).\n ' (height, width) = featmap_size x = (((prior_idxs % width) + self.offset) * self.strides[level_idx][0]) y = ((((prior_idxs // width) % height) + self.offset) * self.strides[level_idx][1]) prioris = torch.stack([x, y], 1).to(dtype) prioris = prioris.to(device) return prioris
class AssignResult(util_mixins.NiceRepr): 'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n ' def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels self._extra_properties = {} @property def num_preds(self): 'int: the number of predictions in this assignment' return len(self.gt_inds) def set_extra_property(self, key, value): 'Set user-defined new property.' assert (key not in self.info) self._extra_properties[key] = value def get_extra_property(self, key): 'Get user-defined property.' return self._extra_properties.get(key, None) @property def info(self): 'dict: a dictionary of info about the object' basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels} basic_info.update(self._extra_properties) return basic_info def __nice__(self): 'str: a "nice" summary string describing this assign result' parts = [] parts.append(f'num_gts={self.num_gts!r}') if (self.gt_inds is None): parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if (self.max_overlaps is None): parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}') if (self.labels is None): parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): 'Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assigned to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA\n >>> self = AssignResult.random()\n >>> print(self.info)\n ' from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if (num_gts is None): num_gts = rng.randint(0, 8) if (num_preds is None): num_preds = rng.randint(0, 16) if (num_gts == 0): max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if ((p_use_label is True) or (p_use_label < rng.rand())): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np max_overlaps = torch.from_numpy(rng.rand(num_preds)) is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned)) n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned) gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds)) gt_inds[is_ignore] = (- 1) gt_inds[(~ is_assigned)] = 0 max_overlaps[(~ is_assigned)] = 0 if ((p_use_label is True) or (p_use_label < rng.rand())): if (num_classes == 0): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds)) labels[(~ is_assigned)] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): 'Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n ' self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if (self.labels is not None): self.labels = torch.cat([gt_labels, self.labels])
class BaseAssigner(metaclass=ABCMeta): 'Base assigner that assigns boxes to ground truth boxes.' @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 'Assign boxes to either a ground truth boxes or a negative boxes.'
@BBOX_ASSIGNERS.register_module() class HungarianAssigner(BaseAssigner): 'Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, regression L1 cost and regression iou cost. The\n targets don\'t include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_weight (int | float, optional): The scale factor for classification\n cost. Default 1.0.\n bbox_weight (int | float, optional): The scale factor for regression\n L1 cost. Default 1.0.\n iou_weight (int | float, optional): The scale factor for regression\n iou cost. Default 1.0.\n iou_calculator (dict | optional): The config for the iou calculation.\n Default type `BboxOverlaps2D`.\n iou_mode (str | optional): "iou" (intersection over union), "iof"\n (intersection over foreground), or "giou" (generalized\n intersection over union). Default "giou".\n ' def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.reg_cost = build_match_cost(reg_cost) self.iou_cost = build_match_cost(iou_cost) def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-07): "Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n " assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.' (num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0)) assigned_gt_inds = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) if ((num_gts == 0) or (num_bboxes == 0)): if (num_gts == 0): assigned_gt_inds[:] = 0 return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) (img_h, img_w, _) = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) cls_cost = self.cls_cost(cls_pred, gt_labels) normalize_gt_bboxes = (gt_bboxes / factor) reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) bboxes = (bbox_cxcywh_to_xyxy(bbox_pred) * factor) iou_cost = self.iou_cost(bboxes, gt_bboxes) cost = ((cls_cost + reg_cost) + iou_cost) cost = cost.detach().cpu() if (linear_sum_assignment is None): raise ImportError('Please run "pip install scipy" to install scipy first.') (matched_row_inds, matched_col_inds) = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(bbox_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to(bbox_pred.device) assigned_gt_inds[:] = 0 assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1) assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
@BBOX_ASSIGNERS.register_module() class MaskHungarianAssigner(BaseAssigner): "Computes one-to-one matching between predictions and ground truth for\n mask.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, mask focal cost and mask dice cost. The\n targets don't include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config.\n mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config.\n dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config.\n " def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict(type='FocalLossCost', weight=1.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.mask_cost = build_match_cost(mask_cost) self.dice_cost = build_match_cost(dice_cost) def assign(self, cls_pred, mask_pred, gt_labels, gt_mask, img_meta, gt_bboxes_ignore=None, eps=1e-07): "Computes one-to-one matching based on the weighted costs.\n\n Args:\n cls_pred (Tensor): Class prediction in shape\n (num_query, cls_out_channels).\n mask_pred (Tensor): Mask prediction in shape (num_query, H, W).\n gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ).\n gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n " assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.' (num_gt, num_query) = (gt_labels.shape[0], cls_pred.shape[0]) assigned_gt_inds = cls_pred.new_full((num_query,), (- 1), dtype=torch.long) assigned_labels = cls_pred.new_full((num_query,), (- 1), dtype=torch.long) if ((num_gt == 0) or (num_query == 0)): if (num_gt == 0): assigned_gt_inds[:] = 0 return AssignResult(num_gt, assigned_gt_inds, None, labels=assigned_labels) if ((self.cls_cost.weight != 0) and (cls_pred is not None)): cls_cost = self.cls_cost(cls_pred, gt_labels) else: cls_cost = 0 if (self.mask_cost.weight != 0): mask_cost = self.mask_cost(mask_pred, gt_mask) else: mask_cost = 0 if (self.dice_cost.weight != 0): dice_cost = self.dice_cost(mask_pred, gt_mask) else: dice_cost = 0 cost = ((cls_cost + mask_cost) + dice_cost) cost = cost.detach().cpu() if (linear_sum_assignment is None): raise ImportError('Please run "pip install scipy" to install scipy first.') (matched_row_inds, matched_col_inds) = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(cls_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to(cls_pred.device) assigned_gt_inds[:] = 0 assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1) assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult(num_gt, assigned_gt_inds, None, labels=assigned_labels)
@BBOX_ASSIGNERS.register_module() class UniformAssigner(BaseAssigner): 'Uniform Matching between the anchors and gt boxes, which can achieve\n balance in positive anchors, and gt_bboxes_ignore was not considered for\n now.\n\n Args:\n pos_ignore_thr (float): the threshold to ignore positive anchors\n neg_ignore_thr (float): the threshold to ignore negative anchors\n match_times(int): Number of positive anchors for each gt box.\n Default 4.\n iou_calculator (dict): iou_calculator config\n ' def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): (num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0)) assigned_gt_inds = bbox_pred.new_full((num_bboxes,), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) if ((num_gts == 0) or (num_bboxes == 0)): if (num_gts == 0): assigned_gt_inds[:] = 0 assign_result = AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property('pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result cost_bbox = torch.cdist(bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_anchors = torch.cdist(bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) C = cost_bbox.cpu() C1 = cost_bbox_anchors.cpu() index = torch.topk(C, k=self.match_times, dim=0, largest=False)[1] index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] indexes = torch.cat((index, index1), dim=1).reshape((- 1)).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) (pred_max_overlaps, _) = pred_overlaps.max(dim=1) (anchor_max_overlaps, _) = anchor_overlaps.max(dim=0) ignore_idx = (pred_max_overlaps > self.neg_ignore_thr) assigned_gt_inds[ignore_idx] = (- 1) pos_gt_index = torch.arange(0, C1.size(1), device=bbox_pred.device).repeat((self.match_times * 2)) pos_ious = anchor_overlaps[(indexes, pos_gt_index)] pos_ignore_idx = (pos_ious < self.pos_ignore_thr) pos_gt_index_with_ignore = (pos_gt_index + 1) pos_gt_index_with_ignore[pos_ignore_idx] = (- 1) assigned_gt_inds[indexes] = pos_gt_index_with_ignore if (gt_labels is not None): assigned_labels = assigned_gt_inds.new_full((num_bboxes,), (- 1)) pos_inds = torch.nonzero((assigned_gt_inds > 0), as_tuple=False).squeeze() if (pos_inds.numel() > 0): assigned_labels[pos_inds] = gt_labels[(assigned_gt_inds[pos_inds] - 1)] else: assigned_labels = None assign_result = AssignResult(num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', (~ pos_ignore_idx)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
def build_assigner(cfg, **default_args): 'Builder of box assigner.' return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
def build_sampler(cfg, **default_args): 'Builder of box sampler.' return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
def build_bbox_coder(cfg, **default_args): 'Builder of box coder.' return build_from_cfg(cfg, BBOX_CODERS, default_args)
class BaseBBoxCoder(metaclass=ABCMeta): 'Base bounding box coder.' def __init__(self, **kwargs): pass @abstractmethod def encode(self, bboxes, gt_bboxes): 'Encode deltas between bboxes and ground truth boxes.' @abstractmethod def decode(self, bboxes, bboxes_pred): 'Decode the predicted bboxes according to prediction and base\n boxes.'
@BBOX_CODERS.register_module() class DistancePointBBoxCoder(BaseBBoxCoder): 'Distance Point BBox coder.\n\n This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n right) and decode it back to the original.\n\n Args:\n clip_border (bool, optional): Whether clip the objects outside the\n border of the image. Defaults to True.\n ' def __init__(self, clip_border=True): super(BaseBBoxCoder, self).__init__() self.clip_border = clip_border def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): 'Encode bounding box to distances.\n\n Args:\n points (Tensor): Shape (N, 2), The format is [x, y].\n gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"\n max_dis (float): Upper bound of the distance. Default None.\n eps (float): a small value to ensure target < max_dis, instead <=.\n Default 0.1.\n\n Returns:\n Tensor: Box transformation deltas. The shape is (N, 4).\n ' assert (points.size(0) == gt_bboxes.size(0)) assert (points.size((- 1)) == 2) assert (gt_bboxes.size((- 1)) == 4) return bbox2distance(points, gt_bboxes, max_dis, eps) def decode(self, points, pred_bboxes, max_shape=None): 'Decode distance prediction to bounding box.\n\n Args:\n points (Tensor): Shape (B, N, 2) or (N, 2).\n pred_bboxes (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom). Shape (B, N, 4)\n or (N, 4)\n max_shape (Sequence[int] or torch.Tensor or Sequence[\n Sequence[int]],optional): Maximum bounds for boxes, specifies\n (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n the max_shape should be a Sequence[Sequence[int]],\n and the length of max_shape should also be B.\n Default None.\n Returns:\n Tensor: Boxes with shape (N, 4) or (B, N, 4)\n ' assert (points.size(0) == pred_bboxes.size(0)) assert (points.size((- 1)) == 2) assert (pred_bboxes.size((- 1)) == 4) if (self.clip_border is False): max_shape = None return distance2bbox(points, pred_bboxes, max_shape)
@BBOX_CODERS.register_module() class PseudoBBoxCoder(BaseBBoxCoder): 'Pseudo bounding box coder.' def __init__(self, **kwargs): super(BaseBBoxCoder, self).__init__(**kwargs) def encode(self, bboxes, gt_bboxes): 'torch.Tensor: return the given ``bboxes``' return gt_bboxes def decode(self, bboxes, pred_bboxes): 'torch.Tensor: return the given ``pred_bboxes``' return pred_bboxes
def build_iou_calculator(cfg, default_args=None): 'Builder of IoU calculator.' return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
def build_match_cost(cfg, default_args=None): 'Builder of IoU calculator.' return build_from_cfg(cfg, MATCH_COST, default_args)
@BBOX_SAMPLERS.register_module() class CombinedSampler(BaseSampler): 'A sampler that combines positive sampler and negative sampler.' def __init__(self, pos_sampler, neg_sampler, **kwargs): super(CombinedSampler, self).__init__(**kwargs) self.pos_sampler = build_sampler(pos_sampler, **kwargs) self.neg_sampler = build_sampler(neg_sampler, **kwargs) def _sample_pos(self, **kwargs): 'Sample positive samples.' raise NotImplementedError def _sample_neg(self, **kwargs): 'Sample negative samples.' raise NotImplementedError
@BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): 'Instance balanced sampler that samples equal number of positive samples\n for each instance.' def _sample_pos(self, assign_result, num_expected, **kwargs): 'Sample positive boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): The assigned results of boxes.\n num_expected (int): The number of expected positive samples\n\n Returns:\n Tensor or ndarray: sampled indices.\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False) if (pos_inds.numel() != 0): pos_inds = pos_inds.squeeze(1) if (pos_inds.numel() <= num_expected): return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int((round((num_expected / float(num_gts))) + 1)) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero((assign_result.gt_inds == i.item()), as_tuple=False) if (inds.numel() != 0): inds = inds.squeeze(1) else: continue if (len(inds) > num_per_gt): inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((set(pos_inds.cpu()) - set(sampled_inds.cpu())))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to(assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif (len(sampled_inds) > num_expected): sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds
@BBOX_SAMPLERS.register_module() class IoUBalancedNegSampler(RandomSampler): 'IoU Balanced Sampling.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n The others are sampled from proposals whose IoU are higher than\n `floor_thr`. These proposals are sampled from some bins evenly, which are\n split by `num_bins` via IoU evenly.\n\n Args:\n num (int): number of proposals.\n pos_fraction (float): fraction of positive proposals.\n floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n set to -1 if all using IoU balanced sampling.\n floor_fraction (float): sampling fraction of proposals under floor_thr.\n num_bins (int): number of bins in IoU balanced sampling.\n ' def __init__(self, num, pos_fraction, floor_thr=(- 1), floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert ((floor_thr >= 0) or (floor_thr == (- 1))) assert (0 <= floor_fraction <= 1) assert (num_bins >= 1) self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): 'Sample according to the iou interval.\n\n Args:\n max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n truth boxes.\n full_set (set(int)): A full set of indices of boxes。\n num_expected (int): Number of expected samples。\n\n Returns:\n np.ndarray: Indices of samples\n ' max_iou = max_overlaps.max() iou_interval = ((max_iou - self.floor_thr) / self.num_bins) per_num_expected = int((num_expected / self.num_bins)) sampled_inds = [] for i in range(self.num_bins): start_iou = (self.floor_thr + (i * iou_interval)) end_iou = (self.floor_thr + ((i + 1) * iou_interval)) tmp_set = set(np.where(np.logical_and((max_overlaps >= start_iou), (max_overlaps < end_iou)))[0]) tmp_inds = list((tmp_set & full_set)) if (len(tmp_inds) > per_num_expected): tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((full_set - set(sampled_inds)))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): 'Sample negative boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): The assigned results of boxes.\n num_expected (int): The number of expected negative samples\n\n Returns:\n Tensor or ndarray: sampled indices.\n ' neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() neg_set = set(neg_inds.cpu().numpy()) if (self.floor_thr > 0): floor_set = set(np.where(np.logical_and((max_overlaps >= 0), (max_overlaps < self.floor_thr)))[0]) iou_sampling_set = set(np.where((max_overlaps >= self.floor_thr))[0]) elif (self.floor_thr == 0): floor_set = set(np.where((max_overlaps == 0))[0]) iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0]) else: floor_set = set() iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0]) self.floor_thr = 0 floor_neg_inds = list((floor_set & neg_set)) iou_sampling_neg_inds = list((iou_sampling_set & neg_set)) num_expected_iou_sampling = int((num_expected * (1 - self.floor_fraction))) if (len(iou_sampling_neg_inds) > num_expected_iou_sampling): if (self.num_bins >= 2): iou_sampled_inds = self.sample_via_interval(max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int) num_expected_floor = (num_expected - len(iou_sampled_inds)) if (len(floor_neg_inds) > num_expected_floor): sampled_floor_inds = self.random_choice(floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds)) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((neg_set - set(sampled_inds)))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device) return sampled_inds
@BBOX_SAMPLERS.register_module() class MaskPseudoSampler(BaseSampler): 'A pseudo sampler that does not do sampling actually.' def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): 'Sample positive samples.' raise NotImplementedError def _sample_neg(self, **kwargs): 'Sample negative samples.' raise NotImplementedError def sample(self, assign_result, masks, gt_masks, **kwargs): 'Directly returns the positive and negative indices of samples.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n masks (torch.Tensor): Bounding boxes\n gt_masks (torch.Tensor): Ground truth boxes\n Returns:\n :obj:`SamplingResult`: sampler results\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique() neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique() gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags) return sampling_result
@BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): 'Online Hard Example Mining Sampler described in `Training Region-based\n Object Detectors with Online Hard Example Mining\n <https://arxiv.org/abs/1604.03540>`_.\n ' def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, loss_key='loss_cls', **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.context = context if (not hasattr(self.context, 'num_stages')): self.bbox_head = self.context.bbox_head else: self.bbox_head = self.context.bbox_head[self.context.current_stage] self.loss_key = loss_key def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) if (not hasattr(self.context, 'num_stages')): bbox_results = self.context._bbox_forward(feats, rois) else: bbox_results = self.context._bbox_forward(self.context.current_stage, feats, rois) cls_score = bbox_results['cls_score'] loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key] (_, topk_loss_inds) = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): 'Sample positive boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n num_expected (int): Number of expected positive samples\n bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n feats (list[torch.Tensor], optional): Multi-level features.\n Defaults to None.\n\n Returns:\n torch.Tensor: Indices of positive samples\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False) if (pos_inds.numel() != 0): pos_inds = pos_inds.squeeze(1) if (pos_inds.numel() <= num_expected): return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): 'Sample negative boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n num_expected (int): Number of expected negative samples\n bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n feats (list[torch.Tensor], optional): Multi-level features.\n Defaults to None.\n\n Returns:\n torch.Tensor: Indices of negative samples\n ' neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: neg_labels = assign_result.labels.new_empty(neg_inds.size(0)).fill_(self.bbox_head.num_classes) return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats)
@BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): 'A pseudo sampler that does not do sampling actually.' def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): 'Sample positive samples.' raise NotImplementedError def _sample_neg(self, **kwargs): 'Sample negative samples.' raise NotImplementedError def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): 'Directly returns the positive and negative indices of samples.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n bboxes (torch.Tensor): Bounding boxes\n gt_bboxes (torch.Tensor): Ground truth boxes\n\n Returns:\n :obj:`SamplingResult`: sampler results\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique() neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique() gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
@BBOX_SAMPLERS.register_module() class RandomSampler(BaseSampler): 'Random sampler.\n\n Args:\n num (int): Number of samples\n pos_fraction (float): Fraction of positive samples\n neg_pos_up (int, optional): Upper bound number of negative and\n positive samples. Defaults to -1.\n add_gt_as_proposals (bool, optional): Whether to add ground truth\n boxes as proposals. Defaults to True.\n ' def __init__(self, num, pos_fraction, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs): from mmdet.core.bbox import demodata super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.rng = demodata.ensure_rng(kwargs.get('rng', None)) def random_choice(self, gallery, num): 'Random select some elements from the gallery.\n\n If `gallery` is a Tensor, the returned indices will be a Tensor;\n If `gallery` is a ndarray or list, the returned indices will be a\n ndarray.\n\n Args:\n gallery (Tensor | ndarray | list): indices pool.\n num (int): expected sample num.\n\n Returns:\n Tensor or ndarray: sampled indices.\n ' assert (len(gallery) >= num) is_tensor = isinstance(gallery, torch.Tensor) if (not is_tensor): if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) rand_inds = gallery[perm] if (not is_tensor): rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): 'Randomly sample some positive samples.' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False) if (pos_inds.numel() != 0): pos_inds = pos_inds.squeeze(1) if (pos_inds.numel() <= num_expected): return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, **kwargs): 'Randomly sample some negative samples.' neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: return self.random_choice(neg_inds, num_expected)
class GeneralData(NiceRepr): 'A general data structure of OpenMMlab.\n\n A data structure that stores the meta information,\n the annotations of the images or the model predictions,\n which can be used in communication between components.\n\n The attributes in `GeneralData` are divided into two parts,\n the `meta_info_fields` and the `data_fields` respectively.\n\n - `meta_info_fields`: Usually contains the\n information about the image such as filename,\n image_shape, pad_shape, etc. All attributes in\n it are immutable once set,\n but the user can add new meta information with\n `set_meta_info` function, all information can be accessed\n with methods `meta_info_keys`, `meta_info_values`,\n `meta_info_items`.\n\n - `data_fields`: Annotations or model predictions are\n stored. The attributes can be accessed or modified by\n dict-like or object-like operations, such as\n `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`,\n `values()`, `items()`. Users can also apply tensor-like methods\n to all obj:`torch.Tensor` in the `data_fileds`,\n such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()`\n `.detach()`, `.numpy()`\n\n Args:\n meta_info (dict, optional): A dict contains the meta information\n of single image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n data (dict, optional): A dict contains annotations of single image or\n model predictions. Default: None.\n\n Examples:\n >>> from mmdet.core import GeneralData\n >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n >>> instance_data = GeneralData(meta_info=img_meta)\n >>> img_shape in instance_data\n True\n >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3])\n >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3])\n >>> print(results)\n <GeneralData(\n\n META INFORMATION\n img_shape: (800, 1196, 3)\n pad_shape: (800, 1216, 3)\n\n DATA FIELDS\n shape of det_labels: torch.Size([4])\n shape of det_scores: torch.Size([4])\n\n ) at 0x7f84acd10f90>\n >>> instance_data.det_scores\n tensor([0.0100, 0.1000, 0.2000, 0.3000])\n >>> instance_data.det_labels\n tensor([0, 1, 2, 3])\n >>> instance_data[\'det_labels\']\n tensor([0, 1, 2, 3])\n >>> \'det_labels\' in instance_data\n True\n >>> instance_data.img_shape\n (800, 1196, 3)\n >>> \'det_scores\' in instance_data\n True\n >>> del instance_data.det_scores\n >>> \'det_scores\' in instance_data\n False\n >>> det_labels = instance_data.pop(\'det_labels\', None)\n >>> det_labels\n tensor([0, 1, 2, 3])\n >>> \'det_labels\' in instance_data\n >>> False\n ' def __init__(self, meta_info=None, data=None): self._meta_info_fields = set() self._data_fields = set() if (meta_info is not None): self.set_meta_info(meta_info=meta_info) if (data is not None): self.set_data(data) def set_meta_info(self, meta_info): 'Add meta information.\n\n Args:\n meta_info (dict): A dict contains the meta information\n of image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n ' assert isinstance(meta_info, dict), f'meta should be a `dict` but get {meta_info}' meta = copy.deepcopy(meta_info) for (k, v) in meta.items(): if (k in self._meta_info_fields): ori_value = getattr(self, k) if isinstance(ori_value, (torch.Tensor, np.ndarray)): if (ori_value == v).all(): continue else: raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ') elif (ori_value == v): continue else: raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ') else: self._meta_info_fields.add(k) self.__dict__[k] = v def set_data(self, data): 'Update a dict to `data_fields`.\n\n Args:\n data (dict): A dict contains annotations of image or\n model predictions. Default: None.\n ' assert isinstance(data, dict), f'meta should be a `dict` but get {data}' for (k, v) in data.items(): self.__setattr__(k, v) def new(self, meta_info=None, data=None): 'Return a new results with same image meta information.\n\n Args:\n meta_info (dict, optional): A dict contains the meta information\n of image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n data (dict, optional): A dict contains annotations of image or\n model predictions. Default: None.\n ' new_data = self.__class__() new_data.set_meta_info(dict(self.meta_info_items())) if (meta_info is not None): new_data.set_meta_info(meta_info) if (data is not None): new_data.set_data(data) return new_data def keys(self): '\n Returns:\n list: Contains all keys in data_fields.\n ' return [key for key in self._data_fields] def meta_info_keys(self): '\n Returns:\n list: Contains all keys in meta_info_fields.\n ' return [key for key in self._meta_info_fields] def values(self): '\n Returns:\n list: Contains all values in data_fields.\n ' return [getattr(self, k) for k in self.keys()] def meta_info_values(self): '\n Returns:\n list: Contains all values in meta_info_fields.\n ' return [getattr(self, k) for k in self.meta_info_keys()] def items(self): for k in self.keys(): (yield (k, getattr(self, k))) def meta_info_items(self): for k in self.meta_info_keys(): (yield (k, getattr(self, k))) def __setattr__(self, name, val): if (name in ('_meta_info_fields', '_data_fields')): if (not hasattr(self, name)): super().__setattr__(name, val) else: raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ') else: if (name in self._meta_info_fields): raise AttributeError(f'`{name}` is used in meta information,which is immutable') self._data_fields.add(name) super().__setattr__(name, val) def __delattr__(self, item): if (item in ('_meta_info_fields', '_data_fields')): raise AttributeError(f'{item} has been used as a private attribute, which is immutable. ') if (item in self._meta_info_fields): raise KeyError(f'{item} is used in meta information, which is immutable.') super().__delattr__(item) if (item in self._data_fields): self._data_fields.remove(item) __setitem__ = __setattr__ __delitem__ = __delattr__ def __getitem__(self, name): return getattr(self, name) def get(self, *args): assert (len(args) < 3), '`get` get more than 2 arguments' return self.__dict__.get(*args) def pop(self, *args): assert (len(args) < 3), '`pop` get more than 2 arguments' name = args[0] if (name in self._meta_info_fields): raise KeyError(f'{name} is a key in meta information, which is immutable') if (args[0] in self._data_fields): self._data_fields.remove(args[0]) return self.__dict__.pop(*args) elif (len(args) == 2): return args[1] else: raise KeyError(f'{args[0]}') def __contains__(self, item): return ((item in self._data_fields) or (item in self._meta_info_fields)) def to(self, *args, **kwargs): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if hasattr(v, 'to'): v = v.to(*args, **kwargs) new_data[k] = v return new_data def cpu(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.cpu() new_data[k] = v return new_data def cuda(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.cuda() new_data[k] = v return new_data def detach(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.detach() new_data[k] = v return new_data def numpy(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() new_data[k] = v return new_data def __nice__(self): repr = '\n \n META INFORMATION \n' for (k, v) in self.meta_info_items(): repr += f'''{k}: {v} ''' repr += '\n DATA FIELDS \n' for (k, v) in self.items(): if isinstance(v, (torch.Tensor, np.ndarray)): repr += f'''shape of {k}: {v.shape} ''' else: repr += f'''{k}: {v} ''' return (repr + '\n')
class InstanceData(GeneralData): 'Data structure for instance-level annnotations or predictions.\n\n Subclass of :class:`GeneralData`. All value in `data_fields`\n should have the same length. This design refer to\n https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501\n\n Examples:\n >>> from mmdet.core import InstanceData\n >>> import numpy as np\n >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n >>> results = InstanceData(img_meta)\n >>> img_shape in results\n True\n >>> results.det_labels = torch.LongTensor([0, 1, 2, 3])\n >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3])\n >>> results["det_masks"] = np.ndarray(4, 2, 2)\n >>> len(results)\n 4\n >>> print(resutls)\n <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216, 3)\n img_shape: (800, 1196, 3)\n\n PREDICTIONS\n shape of det_labels: torch.Size([4])\n shape of det_masks: (4, 2, 2)\n shape of det_scores: torch.Size([4])\n\n ) at 0x7fe26b5ca990>\n >>> sorted_results = results[results.det_scores.sort().indices]\n >>> sorted_results.det_scores\n tensor([0.0100, 0.3000, 0.6000, 0.7000])\n >>> sorted_results.det_labels\n tensor([0, 3, 2, 1])\n >>> print(results[results.scores > 0.5])\n <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216, 3)\n img_shape: (800, 1196, 3)\n\n PREDICTIONS\n shape of det_labels: torch.Size([2])\n shape of det_masks: (2, 2, 2)\n shape of det_scores: torch.Size([2])\n\n ) at 0x7fe26b6d7790>\n >>> results[results.det_scores > 0.5].det_labels\n tensor([1, 2])\n >>> results[results.det_scores > 0.5].det_scores\n tensor([0.7000, 0.6000])\n ' def __setattr__(self, name, value): if (name in ('_meta_info_fields', '_data_fields')): if (not hasattr(self, name)): super().__setattr__(name, value) else: raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ') else: assert isinstance(value, (torch.Tensor, np.ndarray, list)), f'Can set {type(value)}, only support {(torch.Tensor, np.ndarray, list)}' if self._data_fields: assert (len(value) == len(self)), f'the length of values {len(value)} is not consistent with the length of this :obj:`InstanceData` {len(self)} ' super().__setattr__(name, value) def __getitem__(self, item): '\n Args:\n item (str, obj:`slice`,\n obj`torch.LongTensor`, obj:`torch.BoolTensor`):\n get the corresponding values according to item.\n\n Returns:\n obj:`InstanceData`: Corresponding values.\n ' assert len(self), ' This is a empty instance' assert isinstance(item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) if isinstance(item, str): return getattr(self, item) if (type(item) == int): if ((item >= len(self)) or (item < (- len(self)))): raise IndexError(f'Index {item} out of range!') else: item = slice(item, None, len(self)) new_data = self.new() if isinstance(item, torch.Tensor): assert (item.dim() == 1), 'Only support to get the values along the first dimension.' if isinstance(item, torch.BoolTensor): assert (len(item) == len(self)), f'The shape of the input(BoolTensor)) {len(item)} does not match the shape of the indexed tensor in results_filed {len(self)} at first dimension. ' for (k, v) in self.items(): if isinstance(v, torch.Tensor): new_data[k] = v[item] elif isinstance(v, np.ndarray): new_data[k] = v[item.cpu().numpy()] elif isinstance(v, list): r_list = [] if isinstance(item, torch.BoolTensor): indexes = torch.nonzero(item).view((- 1)) else: indexes = item for index in indexes: r_list.append(v[index]) new_data[k] = r_list else: for (k, v) in self.items(): new_data[k] = v[item] return new_data @staticmethod def cat(instances_list): 'Concat the predictions of all :obj:`InstanceData` in the list.\n\n Args:\n instances_list (list[:obj:`InstanceData`]): A list\n of :obj:`InstanceData`.\n\n Returns:\n obj:`InstanceData`\n ' assert all((isinstance(results, InstanceData) for results in instances_list)) assert (len(instances_list) > 0) if (len(instances_list) == 1): return instances_list[0] new_data = instances_list[0].new() for k in instances_list[0]._data_fields: values = [results[k] for results in instances_list] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, np.ndarray): values = np.concatenate(values, axis=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) else: raise ValueError(f'Can not concat the {k} which is a {type(v0)}') new_data[k] = values return new_data def __len__(self): if len(self._data_fields): for v in self.values(): return len(v) else: raise AssertionError('This is an empty `InstanceData`.')
def wider_face_classes(): return ['face']
def voc_classes(): return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def imagenet_det_classes(): return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra']
def imagenet_vid_classes(): return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra']
def coco_classes(): return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']