code
stringlengths
17
6.64M
def test_is_filepath(): assert mmcv.is_filepath(__file__) assert mmcv.is_filepath('abc') assert mmcv.is_filepath(Path('/etc')) assert (not mmcv.is_filepath(0))
def test_fopen(): assert hasattr(mmcv.fopen(__file__), 'read') assert hasattr(mmcv.fopen(Path(__file__)), 'read')
def test_check_file_exist(): mmcv.check_file_exist(__file__) with pytest.raises(FileNotFoundError): mmcv.check_file_exist('no_such_file.txt')
def test_scandir(): folder = osp.join(osp.dirname(osp.dirname(__file__)), 'data/for_scan') filenames = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT'] assert (set(mmcv.scandir(folder)) == set(filenames)) assert (set(mmcv.scandir(Path(folder))) == set(filenames)) assert (set(mmcv.scandir(folder, '.txt')) == set([filename for filename in filenames if filename.endswith('.txt')])) assert (set(mmcv.scandir(folder, ('.json', '.txt'))) == set([filename for filename in filenames if filename.endswith(('.txt', '.json'))])) assert (set(mmcv.scandir(folder, '.png')) == set()) filenames_recursive = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT', osp.join('sub', '1.json'), osp.join('sub', '1.txt'), '.file'] assert (set(mmcv.scandir(folder, recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')])) assert (set(mmcv.scandir(Path(folder), recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')])) assert (set(mmcv.scandir(folder, '.txt', recursive=True)) == set([filename for filename in filenames_recursive if filename.endswith('.txt')])) assert (set(mmcv.scandir(folder, '.TXT', recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.TXT'))])) assert (set(mmcv.scandir(folder, ('.TXT', '.JSON'), recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.json', '.TXT'))])) with pytest.raises(TypeError): list(mmcv.scandir(123)) with pytest.raises(TypeError): list(mmcv.scandir(folder, 111))
def reset_string_io(io): io.truncate(0) io.seek(0)
class TestProgressBar(): def test_start(self): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out) assert (out.getvalue() == 'completed: 0, elapsed: 0s') reset_string_io(out) prog_bar = mmcv.ProgressBar(bar_width=bar_width, start=False, file=out) assert (out.getvalue() == '') reset_string_io(out) prog_bar.start() assert (out.getvalue() == 'completed: 0, elapsed: 0s') reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:") reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, start=False, file=out) assert (out.getvalue() == '') reset_string_io(out) prog_bar.start() assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:") def test_update(self): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (out.getvalue() == 'completed: 1, elapsed: 1s, 1.0 tasks/s') reset_string_io(out) prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (out.getvalue() == f''' [{(('>' * 2) + (' ' * 18))}] 1/10, 1.0 task/s, elapsed: 1s, ETA: 9s''') def test_adaptive_length(self): with patch.dict('os.environ', {'COLUMNS': '80'}): out = StringIO() bar_width = 20 prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out) time.sleep(1) reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 66) os.environ['COLUMNS'] = '30' reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 48) os.environ['COLUMNS'] = '60' reset_string_io(out) prog_bar.update() assert (len(out.getvalue()) == 60)
def sleep_1s(num): time.sleep(1) return num
def test_track_progress_list(): out = StringIO() ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_progress_iterator(): out = StringIO() ret = mmcv.track_progress(sleep_1s, ((i for i in [1, 2, 3]), 3), bar_width=3, file=out) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_iter_progress(): out = StringIO() ret = [] for num in mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out): ret.append(sleep_1s(num)) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3])
def test_track_enum_progress(): out = StringIO() ret = [] count = [] for (i, num) in enumerate(mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out)): ret.append(sleep_1s(num)) count.append(i) assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n') assert (ret == [1, 2, 3]) assert (count == [0, 1, 2])
def test_track_parallel_progress_list(): out = StringIO() results = mmcv.track_parallel_progress(sleep_1s, [1, 2, 3, 4], 2, bar_width=4, file=out) assert (results == [1, 2, 3, 4])
def test_track_parallel_progress_iterator(): out = StringIO() results = mmcv.track_parallel_progress(sleep_1s, ((i for i in [1, 2, 3, 4]), 4), 2, bar_width=4, file=out) assert (results == [1, 2, 3, 4])
def test_registry(): CATS = mmcv.Registry('cat') assert (CATS.name == 'cat') assert (CATS.module_dict == {}) assert (len(CATS) == 0) @CATS.register_module() class BritishShorthair(): pass assert (len(CATS) == 1) assert (CATS.get('BritishShorthair') is BritishShorthair) class Munchkin(): pass CATS.register_module(Munchkin) assert (len(CATS) == 2) assert (CATS.get('Munchkin') is Munchkin) assert ('Munchkin' in CATS) with pytest.raises(KeyError): CATS.register_module(Munchkin) CATS.register_module(Munchkin, force=True) assert (len(CATS) == 2) with pytest.raises(KeyError): @CATS.register_module() class BritishShorthair(): pass @CATS.register_module(force=True) class BritishShorthair(): pass assert (len(CATS) == 2) assert (CATS.get('PersianCat') is None) assert ('PersianCat' not in CATS) @CATS.register_module(name=['Siamese', 'Siamese2']) class SiameseCat(): pass assert (CATS.get('Siamese').__name__ == 'SiameseCat') assert (CATS.get('Siamese2').__name__ == 'SiameseCat') class SphynxCat(): pass CATS.register_module(name='Sphynx', module=SphynxCat) assert (CATS.get('Sphynx') is SphynxCat) CATS.register_module(name=['Sphynx1', 'Sphynx2'], module=SphynxCat) assert (CATS.get('Sphynx2') is SphynxCat) repr_str = 'Registry(name=cat, items={' repr_str += "'BritishShorthair': <class 'test_registry.test_registry.<locals>.BritishShorthair'>, " repr_str += "'Munchkin': <class 'test_registry.test_registry.<locals>.Munchkin'>, " repr_str += "'Siamese': <class 'test_registry.test_registry.<locals>.SiameseCat'>, " repr_str += "'Siamese2': <class 'test_registry.test_registry.<locals>.SiameseCat'>, " repr_str += "'Sphynx': <class 'test_registry.test_registry.<locals>.SphynxCat'>, " repr_str += "'Sphynx1': <class 'test_registry.test_registry.<locals>.SphynxCat'>, " repr_str += "'Sphynx2': <class 'test_registry.test_registry.<locals>.SphynxCat'>" repr_str += '})' assert (repr(CATS) == repr_str) with pytest.raises(TypeError): CATS.register_module(name=7474741, module=SphynxCat) with pytest.raises(TypeError): CATS.register_module(0) with pytest.raises(TypeError): @CATS.register_module() def some_method(): pass with pytest.warns(DeprecationWarning): CATS.register_module(SphynxCat) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): CATS.register_module(SphynxCat, force=True) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): @CATS.register_module class NewCat(): pass assert (CATS.get('NewCat').__name__ == 'NewCat') with pytest.warns(DeprecationWarning): CATS.deprecated_register_module(SphynxCat, force=True) assert (CATS.get('SphynxCat').__name__ == 'SphynxCat') with pytest.warns(DeprecationWarning): @CATS.deprecated_register_module class CuteCat(): pass assert (CATS.get('CuteCat').__name__ == 'CuteCat') with pytest.warns(DeprecationWarning): @CATS.deprecated_register_module(force=True) class NewCat2(): pass assert (CATS.get('NewCat2').__name__ == 'NewCat2')
def test_multi_scope_registry(): DOGS = mmcv.Registry('dogs') assert (DOGS.name == 'dogs') assert (DOGS.scope == 'test_registry') assert (DOGS.module_dict == {}) assert (len(DOGS) == 0) @DOGS.register_module() class GoldenRetriever(): pass assert (len(DOGS) == 1) assert (DOGS.get('GoldenRetriever') is GoldenRetriever) HOUNDS = mmcv.Registry('dogs', parent=DOGS, scope='hound') @HOUNDS.register_module() class BloodHound(): pass assert (len(HOUNDS) == 1) assert (HOUNDS.get('BloodHound') is BloodHound) assert (DOGS.get('hound.BloodHound') is BloodHound) assert (HOUNDS.get('hound.BloodHound') is BloodHound) LITTLE_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='little_hound') @LITTLE_HOUNDS.register_module() class Dachshund(): pass assert (len(LITTLE_HOUNDS) == 1) assert (LITTLE_HOUNDS.get('Dachshund') is Dachshund) assert (LITTLE_HOUNDS.get('hound.BloodHound') is BloodHound) assert (HOUNDS.get('little_hound.Dachshund') is Dachshund) assert (DOGS.get('hound.little_hound.Dachshund') is Dachshund) MID_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='mid_hound') @MID_HOUNDS.register_module() class Beagle(): pass assert (MID_HOUNDS.get('Beagle') is Beagle) assert (HOUNDS.get('mid_hound.Beagle') is Beagle) assert (DOGS.get('hound.mid_hound.Beagle') is Beagle) assert (LITTLE_HOUNDS.get('hound.mid_hound.Beagle') is Beagle) assert (MID_HOUNDS.get('hound.BloodHound') is BloodHound) assert (MID_HOUNDS.get('hound.Dachshund') is None)
def test_build_from_cfg(): BACKBONES = mmcv.Registry('backbone') @BACKBONES.register_module() class ResNet(): def __init__(self, depth, stages=4): self.depth = depth self.stages = stages @BACKBONES.register_module() class ResNeXt(): def __init__(self, depth, stages=4): self.depth = depth self.stages = stages cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args={'stages': 3}) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 3)) cfg = dict(type='ResNeXt', depth=50, stages=3) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNeXt) assert ((model.depth == 50) and (model.stages == 3)) cfg = dict(type=ResNet, depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type='ResNet')) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type=ResNet)) assert isinstance(model, ResNet) assert ((model.depth == 50) and (model.stages == 4)) with pytest.raises(TypeError): cfg = dict(type='VGG') model = mmcv.build_from_cfg(cfg, 'BACKBONES') with pytest.raises(KeyError): cfg = dict(type='VGG') model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=1) with pytest.raises(TypeError): cfg = dict(type=1000) model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(KeyError, match='must contain the key "type"'): cfg = dict(depth=50, stages=4) model = mmcv.build_from_cfg(cfg, BACKBONES) with pytest.raises(KeyError, match='must contain the key "type"'): cfg = dict(depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(stages=4)) with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, 'BACKBONES') with pytest.raises(TypeError): cfg = dict(type='ResNet', depth=50) model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=0) with pytest.raises(TypeError): cfg = dict(type='ResNet', non_existing_arg=50) model = mmcv.build_from_cfg(cfg, BACKBONES)
def test_assert_dict_contains_subset(): dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6)} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6)} assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset) expected_subset = {'a': 'test1', 'b': 2, 'c': (6, 4)} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'a': 'test1', 'b': 2, 'c': None} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'a': 'test1', 'b': 2, 'd': (4, 6)} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [1, 2, 3]])} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [6, 2, 3]])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])} expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])} assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset) if (torch is not None): dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': torch.tensor([5, 3, 5])} expected_subset = {'d': torch.tensor([5, 5, 5])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset)) expected_subset = {'d': torch.tensor([[5, 3, 5], [4, 1, 2]])} assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
def test_assert_attrs_equal(): class TestExample(object): (a, b, c) = (1, ('wvi', 3), [4.5, 3.14]) def test_func(self): return self.b assert mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14]}) assert (not mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14, 2]})) assert (not mmcv.assert_attrs_equal(TestExample, {'bc': 54, 'c': [4.5, 3.14]})) assert mmcv.assert_attrs_equal(TestExample, {'b': ('wvi', 3), 'test_func': TestExample.test_func}) if (torch is not None): class TestExample(object): (a, b) = (torch.tensor([1]), torch.tensor([4, 5])) assert mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 5])}) assert (not mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 6])}))
@pytest.mark.parametrize('obj', assert_dict_has_keys_data_1) @pytest.mark.parametrize('expected_keys, ret_value', assert_dict_has_keys_data_2) def test_assert_dict_has_keys(obj, expected_keys, ret_value): assert (mmcv.assert_dict_has_keys(obj, expected_keys) == ret_value)
@pytest.mark.parametrize('result_keys', assert_keys_equal_data_1) @pytest.mark.parametrize('target_keys, ret_value', assert_keys_equal_data_2) def test_assert_keys_equal(result_keys, target_keys, ret_value): assert (mmcv.assert_keys_equal(result_keys, target_keys) == ret_value)
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_assert_is_norm_layer(): assert (not mmcv.assert_is_norm_layer(nn.Conv3d(3, 64, 3))) assert mmcv.assert_is_norm_layer(nn.BatchNorm3d(128)) assert mmcv.assert_is_norm_layer(nn.GroupNorm(8, 64)) assert (not mmcv.assert_is_norm_layer(nn.Sigmoid()))
@pytest.mark.skipif((torch is None), reason='requires torch library') def test_assert_params_all_zeros(): demo_module = nn.Conv2d(3, 64, 3) nn.init.constant_(demo_module.weight, 0) nn.init.constant_(demo_module.bias, 0) assert mmcv.assert_params_all_zeros(demo_module) nn.init.xavier_normal_(demo_module.weight) nn.init.constant_(demo_module.bias, 0) assert (not mmcv.assert_params_all_zeros(demo_module)) demo_module = nn.Linear(2048, 400, bias=False) nn.init.constant_(demo_module.weight, 0) assert mmcv.assert_params_all_zeros(demo_module) nn.init.normal_(demo_module.weight, mean=0, std=0.01) assert (not mmcv.assert_params_all_zeros(demo_module))
def test_check_python_script(capsys): mmcv.utils.check_python_script('./tests/data/scripts/hello.py zz') captured = capsys.readouterr().out assert (captured == 'hello zz!\n') mmcv.utils.check_python_script('./tests/data/scripts/hello.py agent') captured = capsys.readouterr().out assert (captured == 'hello agent!\n') with pytest.raises(SystemExit): mmcv.utils.check_python_script('./tests/data/scripts/hello.py li zz')
def test_timer_init(): timer = mmcv.Timer(start=False) assert (not timer.is_running) timer.start() assert timer.is_running timer = mmcv.Timer() assert timer.is_running
def test_timer_run(): timer = mmcv.Timer() time.sleep(1) assert (abs((timer.since_start() - 1)) < 0.01) time.sleep(1) assert (abs((timer.since_last_check() - 1)) < 0.01) assert (abs((timer.since_start() - 2)) < 0.01) timer = mmcv.Timer(False) with pytest.raises(mmcv.TimerError): timer.since_start() with pytest.raises(mmcv.TimerError): timer.since_last_check()
def test_timer_context(capsys): with mmcv.Timer(): time.sleep(1) (out, _) = capsys.readouterr() assert (abs((float(out) - 1)) < 0.01) with mmcv.Timer(print_tmpl='time: {:.1f}s'): time.sleep(1) (out, _) = capsys.readouterr() assert (out == 'time: 1.0s\n')
@pytest.mark.skipif((digit_version(torch.__version__) < digit_version('1.6.0')), reason='torch.jit.is_tracing is not available before 1.6.0') def test_is_jit_tracing(): def foo(x): if is_jit_tracing(): return x else: return x.tolist() x = torch.rand(3) assert isinstance(foo(x), list) traced_foo = torch.jit.trace(foo, (torch.rand(1),)) assert isinstance(traced_foo(x), torch.Tensor)
def test_digit_version(): assert (digit_version('0.2.16') == (0, 2, 16, 0, 0, 0)) assert (digit_version('1.2.3') == (1, 2, 3, 0, 0, 0)) assert (digit_version('1.2.3rc0') == (1, 2, 3, 0, (- 1), 0)) assert (digit_version('1.2.3rc1') == (1, 2, 3, 0, (- 1), 1)) assert (digit_version('1.0rc0') == (1, 0, 0, 0, (- 1), 0)) assert (digit_version('1.0') == digit_version('1.0.0')) assert (digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5')) assert (digit_version('1.0.0dev') < digit_version('1.0.0a')) assert (digit_version('1.0.0a') < digit_version('1.0.0a1')) assert (digit_version('1.0.0a') < digit_version('1.0.0b')) assert (digit_version('1.0.0b') < digit_version('1.0.0rc')) assert (digit_version('1.0.0rc1') < digit_version('1.0.0')) assert (digit_version('1.0.0') < digit_version('1.0.0post')) assert (digit_version('1.0.0post') < digit_version('1.0.0post1')) assert (digit_version('v1') == (1, 0, 0, 0, 0, 0)) assert (digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)) with pytest.raises(AssertionError): digit_version('a') with pytest.raises(AssertionError): digit_version('1x') with pytest.raises(AssertionError): digit_version('1.x')
def test_parse_version_info(): assert (parse_version_info('0.2.16') == (0, 2, 16, 0, 0, 0)) assert (parse_version_info('1.2.3') == (1, 2, 3, 0, 0, 0)) assert (parse_version_info('1.2.3rc0') == (1, 2, 3, 0, 'rc', 0)) assert (parse_version_info('1.2.3rc1') == (1, 2, 3, 0, 'rc', 1)) assert (parse_version_info('1.0rc0') == (1, 0, 0, 0, 'rc', 0))
def _mock_cmd_success(cmd): return '3b46d33e90c397869ad5103075838fdfc9812aa0'.encode('ascii')
def _mock_cmd_fail(cmd): raise OSError
def test_get_git_hash(): with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success): assert (get_git_hash() == '3b46d33e90c397869ad5103075838fdfc9812aa0') assert (get_git_hash(digits=6) == '3b46d3') assert (get_git_hash(digits=100) == get_git_hash()) with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_fail): assert (get_git_hash() == 'unknown') assert (get_git_hash(fallback='n/a') == 'n/a')
class TestVideoEditor(): @classmethod def setup_class(cls): cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4') cls.num_frames = 168 @pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows') def test_cut_concat_video(self): part1_file = osp.join(tempfile.gettempdir(), '.mmcv_test1.mp4') part2_file = osp.join(tempfile.gettempdir(), '.mmcv_test2.mp4') mmcv.cut_video(self.video_path, part1_file, end=3, vcodec='h264') mmcv.cut_video(self.video_path, part2_file, start=3, vcodec='h264') v1 = mmcv.VideoReader(part1_file) v2 = mmcv.VideoReader(part2_file) assert (len(v1) == 75) assert (len(v2) == (self.num_frames - 75)) out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4') mmcv.concat_video([part1_file, part2_file], out_file) v = mmcv.VideoReader(out_file) assert (len(v) == self.num_frames) os.remove(part1_file) os.remove(part2_file) os.remove(out_file) @pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows') def test_resize_video(self): out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4') mmcv.resize_video(self.video_path, out_file, (200, 100), log_level='panic') v = mmcv.VideoReader(out_file) assert (v.resolution == (200, 100)) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, ratio=2) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), (240 * 2))) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, (1000, 480), keep_ar=True) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), (240 * 2))) os.remove(out_file) mmcv.resize_video(self.video_path, out_file, ratio=(2, 1.5), keep_ar=True) v = mmcv.VideoReader(out_file) assert (v.resolution == ((294 * 2), 360)) os.remove(out_file)
class TestCache(): def test_init(self): with pytest.raises(ValueError): mmcv.Cache(0) cache = mmcv.Cache(100) assert (cache.capacity == 100) assert (cache.size == 0) def test_put(self): cache = mmcv.Cache(3) for i in range(1, 4): cache.put(f'k{i}', i) assert (cache.size == i) assert (cache._cache == OrderedDict([('k1', 1), ('k2', 2), ('k3', 3)])) cache.put('k4', 4) assert (cache.size == 3) assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])) cache.put('k2', 2) assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)])) def test_get(self): cache = mmcv.Cache(3) assert (cache.get('key_none') is None) assert (cache.get('key_none', 0) == 0) cache.put('k1', 1) assert (cache.get('k1') == 1)
class TestVideoReader(): @classmethod def setup_class(cls): cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4') cls.num_frames = 168 cls.video_url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4' def test_load(self): v = mmcv.VideoReader(self.video_path) assert (v.width == 294) assert (v.height == 240) assert (v.fps == 25) assert (v.frame_cnt == self.num_frames) assert (len(v) == self.num_frames) assert v.opened import cv2 assert isinstance(v.vcap, type(cv2.VideoCapture())) v = mmcv.VideoReader(self.video_url) assert (v.width == 320) assert (v.height == 240) assert (v.fps == 15) assert (v.frame_cnt == 1889) assert (len(v) == 1889) assert v.opened assert isinstance(v.vcap, type(cv2.VideoCapture())) def test_read(self): v = mmcv.VideoReader(self.video_path) img = v.read() assert (int(round(img.mean())) == 94) img = v.get_frame(63) assert (int(round(img.mean())) == 94) img = v[64] assert (int(round(img.mean())) == 205) img = v[(- 104)] assert (int(round(img.mean())) == 205) img = v[63] assert (int(round(img.mean())) == 94) img = v[(- 105)] assert (int(round(img.mean())) == 94) img = v.read() assert (int(round(img.mean())) == 205) with pytest.raises(IndexError): v.get_frame((self.num_frames + 1)) with pytest.raises(IndexError): v[((- self.num_frames) - 1)] def test_slice(self): v = mmcv.VideoReader(self.video_path) imgs = v[(- 105):(- 103)] assert (int(round(imgs[0].mean())) == 94) assert (int(round(imgs[1].mean())) == 205) assert (len(imgs) == 2) imgs = v[63:65] assert (int(round(imgs[0].mean())) == 94) assert (int(round(imgs[1].mean())) == 205) assert (len(imgs) == 2) imgs = v[64:62:(- 1)] assert (int(round(imgs[0].mean())) == 205) assert (int(round(imgs[1].mean())) == 94) assert (len(imgs) == 2) imgs = v[:5] assert (len(imgs) == 5) for img in imgs: assert (int(round(img.mean())) == 94) imgs = v[165:] assert (len(imgs) == 3) for img in imgs: assert (int(round(img.mean())) == 0) imgs = v[(- 3):] assert (len(imgs) == 3) for img in imgs: assert (int(round(img.mean())) == 0) def test_current_frame(self): v = mmcv.VideoReader(self.video_path) assert (v.current_frame() is None) v.read() img = v.current_frame() assert (int(round(img.mean())) == 94) def test_position(self): v = mmcv.VideoReader(self.video_path) assert (v.position == 0) for _ in range(10): v.read() assert (v.position == 10) v.get_frame(99) assert (v.position == 100) def test_iterator(self): cnt = 0 for img in mmcv.VideoReader(self.video_path): cnt += 1 assert (img.shape == (240, 294, 3)) assert (cnt == self.num_frames) def test_with(self): with mmcv.VideoReader(self.video_path) as v: assert v.opened assert (not v.opened) def test_cvt2frames(self): v = mmcv.VideoReader(self.video_path) frame_dir = tempfile.mkdtemp() v.cvt2frames(frame_dir) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) os.remove(filename) v = mmcv.VideoReader(self.video_path) v.cvt2frames(frame_dir, show_progress=False) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) os.remove(filename) v = mmcv.VideoReader(self.video_path) v.cvt2frames(frame_dir, file_start=100, filename_tmpl='{:03d}.JPEG', start=100, max_num=20) assert osp.isdir(frame_dir) for i in range(100, 120): filename = f'{frame_dir}/{i:03d}.JPEG' assert osp.isfile(filename) os.remove(filename) shutil.rmtree(frame_dir) def test_frames2video(self): v = mmcv.VideoReader(self.video_path) frame_dir = tempfile.mkdtemp() v.cvt2frames(frame_dir) assert osp.isdir(frame_dir) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' assert osp.isfile(filename) out_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.avi') mmcv.frames2video(frame_dir, out_filename) v = mmcv.VideoReader(out_filename) assert (v.fps == 30) assert (len(v) == self.num_frames) mmcv.frames2video(frame_dir, out_filename, fps=25, start=10, end=50, show_progress=False) with mmcv.VideoReader(out_filename) as v: assert (v.fps == 25) assert (len(v) == 40) for i in range(self.num_frames): filename = f'{frame_dir}/{i:06d}.jpg' os.remove(filename) shutil.rmtree(frame_dir)
def test_color(): assert (mmcv.color_val(mmcv.Color.blue) == (255, 0, 0)) assert (mmcv.color_val('green') == (0, 255, 0)) assert (mmcv.color_val((1, 2, 3)) == (1, 2, 3)) assert (mmcv.color_val(100) == (100, 100, 100)) assert (mmcv.color_val(np.zeros(3, dtype=int)) == (0, 0, 0)) with pytest.raises(TypeError): mmcv.color_val([255, 255, 255]) with pytest.raises(TypeError): mmcv.color_val(1.0) with pytest.raises(AssertionError): mmcv.color_val((0, 0, 500))
def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif (x.find('rc') != (- 1)): patch_version = x.split('rc') digit_version.append((int(patch_version[0]) - 1)) digit_version.append(int(patch_version[1])) return digit_version
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): 'Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n cfg_options (dict): Options to override some settings in the used\n config.\n\n Returns:\n nn.Module: The constructed detector.\n ' if isinstance(config, str): config = mmcv.Config.fromfile(config) elif (not isinstance(config, mmcv.Config)): raise TypeError(f'config must be a filename or Config object, but got {type(config)}') if (cfg_options is not None): config.merge_from_dict(cfg_options) if ('pretrained' in config.model): config.model.pretrained = None elif ('init_cfg' in config.model.backbone): config.model.backbone.init_cfg = None config.model.train_cfg = None model = build_detector(config.model, test_cfg=config.get('test_cfg')) if (checkpoint is not None): checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') if ('CLASSES' in checkpoint.get('meta', {})): model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.") model.CLASSES = get_classes('coco') model.cfg = config model.to(device) model.eval() return model
class LoadImage(): 'Deprecated.\n\n A simple pipeline to load image.\n ' def __call__(self, results): 'Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n Returns:\n dict: ``results`` will be returned containing loaded image.\n ' warnings.simplefilter('once') warnings.warn('`LoadImage` is deprecated and will be removed in future releases. You may use `LoadImageFromWebcam` from `mmdet.datasets.pipelines.` instead.') if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results
def inference_detector(model, imgs): 'Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n Either image files or loaded images.\n\n Returns:\n If imgs is a list or tuple, the same length list type results\n will be returned, otherwise return the detection results directly.\n ' if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg device = next(model.parameters()).device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: if isinstance(img, np.ndarray): data = dict(img=img) else: data = dict(img_info=dict(filename=img), img_prefix=None) data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: data = scatter(data, [device])[0] else: for m in model.modules(): assert (not isinstance(m, RoIPool)), 'CPU inference with RoIPool is not supported currently.' with torch.no_grad(): results = model(return_loss=False, rescale=True, **data) if (not is_batch): return results[0] else: return results
def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None): 'Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n score_thr (float): The threshold to visualize the bboxes and masks.\n title (str): Title of the pyplot figure.\n wait_time (float): Value of waitKey param.\n Default: 0.\n ' if hasattr(model, 'module'): model = model.module model.show_result(img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=palette, text_color=(200, 200, 200), mask_color=palette)
def init_random_seed(seed=None, device='cuda'): "Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n " if (seed is not None): return seed (rank, world_size) = get_dist_info() seed = np.random.randint((2 ** 31)) if (world_size == 1): return seed if (rank == 0): random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
def set_random_seed(seed, deterministic=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n ' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(log_level=cfg.log_level) dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) if ('imgs_per_gpu' in cfg.data): logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead') if ('samples_per_gpu' in cfg.data): logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments') else: logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu runner_type = ('EpochBasedRunner' if ('runner' not in cfg) else cfg.runner['type']) data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=cfg.data.get('persistent_workers', False)) for ds in dataset] if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = MMDataParallel(model, device_ids=cfg.gpu_ids) optimizer = build_optimizer(model, cfg.optimizer) if ('runner' not in cfg): cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs} warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning) elif ('total_epochs' in cfg): assert (cfg.total_epochs == cfg.runner.max_epochs) runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) runner.timestamp = timestamp fp16_cfg = cfg.get('fp16', None) if (fp16_cfg is not None): optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif (distributed and ('type' not in cfg.optimizer_config)): optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) if validate: val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1) if (val_samples_per_gpu > 1): cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner') eval_hook = (DistEvalHook if distributed else EvalHook) runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW') resume_from = None if ((cfg.resume_from is None) and cfg.get('auto_resume')): resume_from = find_latest_checkpoint(cfg.work_dir) if (resume_from is not None): cfg.resume_from = resume_from if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow)
def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
def build_anchor_generator(cfg, default_args=None): warnings.warn('``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` ') return build_prior_generator(cfg, default_args=default_args)
@PRIOR_GENERATORS.register_module() class PointGenerator(): def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1)) if row_major: return (xx, yy) else: return (yy, xx) def grid_points(self, featmap_size, stride=16, device='cuda'): (feat_h, feat_w) = featmap_size shift_x = (torch.arange(0.0, feat_w, device=device) * stride) shift_y = (torch.arange(0.0, feat_h, device=device) * stride) (shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0],), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=(- 1)) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): (feat_h, feat_w) = featmap_size (valid_h, valid_w) = valid_size assert ((valid_h <= feat_h) and (valid_w <= feat_w)) valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 (valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y) valid = (valid_xx & valid_yy) return valid
@PRIOR_GENERATORS.register_module() class MlvlPointGenerator(): 'Standard points generator for multi-level (Mlvl) feature maps in 2D\n points-based detectors.\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels in order (w, h).\n offset (float): The offset of points, the value is normalized with\n corresponding stride. Defaults to 0.5.\n ' def __init__(self, strides, offset=0.5): self.strides = [_pair(stride) for stride in strides] self.offset = offset @property def num_levels(self): 'int: number of feature levels that the generator will be applied' return len(self.strides) @property def num_base_priors(self): 'list[int]: The number of priors (points) at a point\n on the feature grid' return [1 for _ in range(len(self.strides))] def _meshgrid(self, x, y, row_major=True): (yy, xx) = torch.meshgrid(y, x) if row_major: return (xx.reshape((- 1)), yy.reshape((- 1))) else: return (yy.reshape((- 1)), xx.reshape((- 1))) def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda', with_stride=False): 'Generate grid points of multiple feature levels.\n\n Args:\n featmap_sizes (list[tuple]): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str): The device where the anchors will be put on.\n with_stride (bool): Whether to concatenate the stride to\n the last dimension of points.\n\n Return:\n list[torch.Tensor]: Points of multiple feature levels.\n The sizes of each tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n ' assert (self.num_levels == len(featmap_sizes)) multi_level_priors = [] for i in range(self.num_levels): priors = self.single_level_grid_priors(featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False): "Generate grid Points of a single level.\n\n Note:\n This function is usually called by method ``self.grid_priors``.\n\n Args:\n featmap_size (tuple[int]): Size of the feature maps, arrange as\n (h, w).\n level_idx (int): The index of corresponding feature map level.\n dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n device (str, optional): The device the tensor will be put on.\n Defaults to 'cuda'.\n with_stride (bool): Concatenate the stride to the last dimension\n of points.\n\n Return:\n Tensor: Points of single feature levels.\n The shape of tensor should be (N, 2) when with stride is\n ``False``, where N = width * height, width and height\n are the sizes of the corresponding feature level,\n and the last dimension 2 represent (coord_x, coord_y),\n otherwise the shape should be (N, 4),\n and the last dimension 4 represent\n (coord_x, coord_y, stride_w, stride_h).\n " (feat_h, feat_w) = featmap_size (stride_w, stride_h) = self.strides[level_idx] shift_x = ((torch.arange(0, feat_w, device=device) + self.offset) * stride_w) shift_x = shift_x.to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + self.offset) * stride_h) shift_y = shift_y.to(dtype) (shift_xx, shift_yy) = self._meshgrid(shift_x, shift_y) if (not with_stride): shifts = torch.stack([shift_xx, shift_yy], dim=(- 1)) else: stride_w = shift_xx.new_full((shift_xx.shape[0],), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0],), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=(- 1)) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): 'Generate valid flags of points of multiple feature levels.\n\n Args:\n featmap_sizes (list(tuple)): List of feature map sizes in\n multiple feature levels, each size arrange as\n as (h, w).\n pad_shape (tuple(int)): The padded shape of the image,\n arrange as (h, w).\n device (str): The device where the anchors will be put on.\n\n Return:\n list(torch.Tensor): Valid flags of points of multiple levels.\n ' assert (self.num_levels == len(featmap_sizes)) multi_level_flags = [] for i in range(self.num_levels): point_stride = self.strides[i] (feat_h, feat_w) = featmap_sizes[i] (h, w) = pad_shape[:2] valid_feat_h = min(int(np.ceil((h / point_stride[1]))), feat_h) valid_feat_w = min(int(np.ceil((w / point_stride[0]))), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, device='cuda'): "Generate the valid flags of points of a single feature map.\n\n Args:\n featmap_size (tuple[int]): The size of feature maps, arrange as\n as (h, w).\n valid_size (tuple[int]): The valid size of the feature maps.\n The size arrange as as (h, w).\n device (str, optional): The device where the flags will be put on.\n Defaults to 'cuda'.\n\n Returns:\n torch.Tensor: The valid flags of each points in a single level feature map.\n " (feat_h, feat_w) = featmap_size (valid_h, valid_w) = valid_size assert ((valid_h <= feat_h) and (valid_w <= feat_w)) valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 (valid_xx, valid_yy) = self._meshgrid(valid_x, valid_y) valid = (valid_xx & valid_yy) return valid def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): 'Generate sparse points according to the ``prior_idxs``.\n\n Args:\n prior_idxs (Tensor): The index of corresponding anchors\n in the feature map.\n featmap_size (tuple[int]): feature map size arrange as (w, h).\n level_idx (int): The level index of corresponding feature\n map.\n dtype (obj:`torch.dtype`): Date type of points. Defaults to\n ``torch.float32``.\n device (obj:`torch.device`): The device where the points is\n located.\n Returns:\n Tensor: Anchor with shape (N, 2), N should be equal to\n the length of ``prior_idxs``. And last dimension\n 2 represent (coord_x, coord_y).\n ' (height, width) = featmap_size x = (((prior_idxs % width) + self.offset) * self.strides[level_idx][0]) y = ((((prior_idxs // width) % height) + self.offset) * self.strides[level_idx][1]) prioris = torch.stack([x, y], 1).to(dtype) prioris = prioris.to(device) return prioris
class AssignResult(util_mixins.NiceRepr): 'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n index of the assigned truth box. 0 means unassigned and -1 means\n ignore.\n\n max_overlaps (FloatTensor): the iou between the predicted box and its\n assigned truth box.\n\n labels (None | LongTensor): If specified, for each predicted box\n indicates the category label of the assigned truth box.\n\n Example:\n >>> # An assign result between 4 predicted boxes and 9 true boxes\n >>> # where only two boxes were assigned.\n >>> num_gts = 9\n >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n >>> labels = torch.LongTensor([0, 3, 4, 0])\n >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n labels.shape=(4,))>\n >>> # Force addition of gt labels (when adding gt as proposals)\n >>> new_labels = torch.LongTensor([3, 4, 5])\n >>> self.add_gt_(new_labels)\n >>> print(str(self)) # xdoctest: +IGNORE_WANT\n <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n labels.shape=(7,))>\n ' def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels self._extra_properties = {} @property def num_preds(self): 'int: the number of predictions in this assignment' return len(self.gt_inds) def set_extra_property(self, key, value): 'Set user-defined new property.' assert (key not in self.info) self._extra_properties[key] = value def get_extra_property(self, key): 'Get user-defined property.' return self._extra_properties.get(key, None) @property def info(self): 'dict: a dictionary of info about the object' basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels} basic_info.update(self._extra_properties) return basic_info def __nice__(self): 'str: a "nice" summary string describing this assign result' parts = [] parts.append(f'num_gts={self.num_gts!r}') if (self.gt_inds is None): parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if (self.max_overlaps is None): parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}') if (self.labels is None): parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): 'Create random AssignResult for tests or debugging.\n\n Args:\n num_preds: number of predicted boxes\n num_gts: number of true boxes\n p_ignore (float): probability of a predicted box assigned to an\n ignored truth\n p_assigned (float): probability of a predicted box not being\n assigned\n p_use_label (float | bool): with labels or not\n rng (None | int | numpy.random.RandomState): seed or state\n\n Returns:\n :obj:`AssignResult`: Randomly generated assign results.\n\n Example:\n >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA\n >>> self = AssignResult.random()\n >>> print(self.info)\n ' from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if (num_gts is None): num_gts = rng.randint(0, 8) if (num_preds is None): num_preds = rng.randint(0, 16) if (num_gts == 0): max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if ((p_use_label is True) or (p_use_label < rng.rand())): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np max_overlaps = torch.from_numpy(rng.rand(num_preds)) is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned)) n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned) gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds)) gt_inds[is_ignore] = (- 1) gt_inds[(~ is_assigned)] = 0 max_overlaps[(~ is_assigned)] = 0 if ((p_use_label is True) or (p_use_label < rng.rand())): if (num_classes == 0): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds)) labels[(~ is_assigned)] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): 'Add ground truth as assigned results.\n\n Args:\n gt_labels (torch.Tensor): Labels of gt boxes\n ' self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if (self.labels is not None): self.labels = torch.cat([gt_labels, self.labels])
class BaseAssigner(metaclass=ABCMeta): 'Base assigner that assigns boxes to ground truth boxes.' @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): 'Assign boxes to either a ground truth boxes or a negative boxes.'
@BBOX_ASSIGNERS.register_module() class HungarianAssigner(BaseAssigner): 'Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, regression L1 cost and regression iou cost. The\n targets don\'t include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_weight (int | float, optional): The scale factor for classification\n cost. Default 1.0.\n bbox_weight (int | float, optional): The scale factor for regression\n L1 cost. Default 1.0.\n iou_weight (int | float, optional): The scale factor for regression\n iou cost. Default 1.0.\n iou_calculator (dict | optional): The config for the iou calculation.\n Default type `BboxOverlaps2D`.\n iou_mode (str | optional): "iou" (intersection over union), "iof"\n (intersection over foreground), or "giou" (generalized\n intersection over union). Default "giou".\n ' def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.reg_cost = build_match_cost(reg_cost) self.iou_cost = build_match_cost(iou_cost) def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-07): "Computes one-to-one matching based on the weighted costs.\n\n This method assign each query prediction to a ground truth or\n background. The `assigned_gt_inds` with -1 means don't care,\n 0 means negative sample, and positive number is the index (1-based)\n of assigned gt.\n The assignment is done in the following steps, the order matters.\n\n 1. assign every prediction to -1\n 2. compute the weighted costs\n 3. do Hungarian matching on CPU based on the costs\n 4. assign all to 0 (background) first, then for each matched pair\n between predictions and gts, treat this prediction as foreground\n and assign the corresponding gt index (plus 1) to it.\n\n Args:\n bbox_pred (Tensor): Predicted boxes with normalized coordinates\n (cx, cy, w, h), which are all in range [0, 1]. Shape\n [num_query, 4].\n cls_pred (Tensor): Predicted classification logits, shape\n [num_query, num_class].\n gt_bboxes (Tensor): Ground truth boxes with unnormalized\n coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n " assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.' (num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0)) assigned_gt_inds = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) if ((num_gts == 0) or (num_bboxes == 0)): if (num_gts == 0): assigned_gt_inds[:] = 0 return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) (img_h, img_w, _) = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) cls_cost = self.cls_cost(cls_pred, gt_labels) normalize_gt_bboxes = (gt_bboxes / factor) reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) bboxes = (bbox_cxcywh_to_xyxy(bbox_pred) * factor) iou_cost = self.iou_cost(bboxes, gt_bboxes) cost = ((cls_cost + reg_cost) + iou_cost) cost = cost.detach().cpu() if (linear_sum_assignment is None): raise ImportError('Please run "pip install scipy" to install scipy first.') (matched_row_inds, matched_col_inds) = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(bbox_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to(bbox_pred.device) assigned_gt_inds[:] = 0 assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1) assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
@BBOX_ASSIGNERS.register_module() class MaskHungarianAssigner(BaseAssigner): "Computes one-to-one matching between predictions and ground truth for\n mask.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification cost, mask focal cost and mask dice cost. The\n targets don't include the no_object, so generally there are more\n predictions than targets. After the one-to-one matching, the un-matched\n are treated as backgrounds. Thus each query prediction will be assigned\n with `0` or a positive integer indicating the ground truth index:\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config.\n mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config.\n dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config.\n " def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict(type='FocalLossCost', weight=1.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.mask_cost = build_match_cost(mask_cost) self.dice_cost = build_match_cost(dice_cost) def assign(self, cls_pred, mask_pred, gt_labels, gt_mask, img_meta, gt_bboxes_ignore=None, eps=1e-07): "Computes one-to-one matching based on the weighted costs.\n\n Args:\n cls_pred (Tensor): Class prediction in shape\n (num_query, cls_out_channels).\n mask_pred (Tensor): Mask prediction in shape (num_query, H, W).\n gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ).\n gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W).\n img_meta (dict): Meta information for current image.\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`. Default None.\n eps (int | float, optional): A value added to the denominator for\n numerical stability. Default 1e-7.\n\n Returns:\n :obj:`AssignResult`: The assigned result.\n " assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.' (num_gt, num_query) = (gt_labels.shape[0], cls_pred.shape[0]) assigned_gt_inds = cls_pred.new_full((num_query,), (- 1), dtype=torch.long) assigned_labels = cls_pred.new_full((num_query,), (- 1), dtype=torch.long) if ((num_gt == 0) or (num_query == 0)): if (num_gt == 0): assigned_gt_inds[:] = 0 return AssignResult(num_gt, assigned_gt_inds, None, labels=assigned_labels) if ((self.cls_cost.weight != 0) and (cls_pred is not None)): cls_cost = self.cls_cost(cls_pred, gt_labels) else: cls_cost = 0 if (self.mask_cost.weight != 0): mask_cost = self.mask_cost(mask_pred, gt_mask) else: mask_cost = 0 if (self.dice_cost.weight != 0): dice_cost = self.dice_cost(mask_pred, gt_mask) else: dice_cost = 0 cost = ((cls_cost + mask_cost) + dice_cost) cost = cost.detach().cpu() if (linear_sum_assignment is None): raise ImportError('Please run "pip install scipy" to install scipy first.') (matched_row_inds, matched_col_inds) = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to(cls_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to(cls_pred.device) assigned_gt_inds[:] = 0 assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1) assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult(num_gt, assigned_gt_inds, None, labels=assigned_labels)
@BBOX_ASSIGNERS.register_module() class UniformAssigner(BaseAssigner): 'Uniform Matching between the anchors and gt boxes, which can achieve\n balance in positive anchors, and gt_bboxes_ignore was not considered for\n now.\n\n Args:\n pos_ignore_thr (float): the threshold to ignore positive anchors\n neg_ignore_thr (float): the threshold to ignore negative anchors\n match_times(int): Number of positive anchors for each gt box.\n Default 4.\n iou_calculator (dict): iou_calculator config\n ' def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): (num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0)) assigned_gt_inds = bbox_pred.new_full((num_bboxes,), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long) if ((num_gts == 0) or (num_bboxes == 0)): if (num_gts == 0): assigned_gt_inds[:] = 0 assign_result = AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property('pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result cost_bbox = torch.cdist(bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_anchors = torch.cdist(bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) C = cost_bbox.cpu() C1 = cost_bbox_anchors.cpu() index = torch.topk(C, k=self.match_times, dim=0, largest=False)[1] index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] indexes = torch.cat((index, index1), dim=1).reshape((- 1)).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) (pred_max_overlaps, _) = pred_overlaps.max(dim=1) (anchor_max_overlaps, _) = anchor_overlaps.max(dim=0) ignore_idx = (pred_max_overlaps > self.neg_ignore_thr) assigned_gt_inds[ignore_idx] = (- 1) pos_gt_index = torch.arange(0, C1.size(1), device=bbox_pred.device).repeat((self.match_times * 2)) pos_ious = anchor_overlaps[(indexes, pos_gt_index)] pos_ignore_idx = (pos_ious < self.pos_ignore_thr) pos_gt_index_with_ignore = (pos_gt_index + 1) pos_gt_index_with_ignore[pos_ignore_idx] = (- 1) assigned_gt_inds[indexes] = pos_gt_index_with_ignore if (gt_labels is not None): assigned_labels = assigned_gt_inds.new_full((num_bboxes,), (- 1)) pos_inds = torch.nonzero((assigned_gt_inds > 0), as_tuple=False).squeeze() if (pos_inds.numel() > 0): assigned_labels[pos_inds] = gt_labels[(assigned_gt_inds[pos_inds] - 1)] else: assigned_labels = None assign_result = AssignResult(num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', (~ pos_ignore_idx)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
def build_assigner(cfg, **default_args): 'Builder of box assigner.' return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
def build_sampler(cfg, **default_args): 'Builder of box sampler.' return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
def build_bbox_coder(cfg, **default_args): 'Builder of box coder.' return build_from_cfg(cfg, BBOX_CODERS, default_args)
class BaseBBoxCoder(metaclass=ABCMeta): 'Base bounding box coder.' def __init__(self, **kwargs): pass @abstractmethod def encode(self, bboxes, gt_bboxes): 'Encode deltas between bboxes and ground truth boxes.' @abstractmethod def decode(self, bboxes, bboxes_pred): 'Decode the predicted bboxes according to prediction and base\n boxes.'
@BBOX_CODERS.register_module() class DistancePointBBoxCoder(BaseBBoxCoder): 'Distance Point BBox coder.\n\n This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n right) and decode it back to the original.\n\n Args:\n clip_border (bool, optional): Whether clip the objects outside the\n border of the image. Defaults to True.\n ' def __init__(self, clip_border=True): super(BaseBBoxCoder, self).__init__() self.clip_border = clip_border def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): 'Encode bounding box to distances.\n\n Args:\n points (Tensor): Shape (N, 2), The format is [x, y].\n gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy"\n max_dis (float): Upper bound of the distance. Default None.\n eps (float): a small value to ensure target < max_dis, instead <=.\n Default 0.1.\n\n Returns:\n Tensor: Box transformation deltas. The shape is (N, 4).\n ' assert (points.size(0) == gt_bboxes.size(0)) assert (points.size((- 1)) == 2) assert (gt_bboxes.size((- 1)) == 4) return bbox2distance(points, gt_bboxes, max_dis, eps) def decode(self, points, pred_bboxes, max_shape=None): 'Decode distance prediction to bounding box.\n\n Args:\n points (Tensor): Shape (B, N, 2) or (N, 2).\n pred_bboxes (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom). Shape (B, N, 4)\n or (N, 4)\n max_shape (Sequence[int] or torch.Tensor or Sequence[\n Sequence[int]],optional): Maximum bounds for boxes, specifies\n (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n the max_shape should be a Sequence[Sequence[int]],\n and the length of max_shape should also be B.\n Default None.\n Returns:\n Tensor: Boxes with shape (N, 4) or (B, N, 4)\n ' assert (points.size(0) == pred_bboxes.size(0)) assert (points.size((- 1)) == 2) assert (pred_bboxes.size((- 1)) == 4) if (self.clip_border is False): max_shape = None return distance2bbox(points, pred_bboxes, max_shape)
@BBOX_CODERS.register_module() class PseudoBBoxCoder(BaseBBoxCoder): 'Pseudo bounding box coder.' def __init__(self, **kwargs): super(BaseBBoxCoder, self).__init__(**kwargs) def encode(self, bboxes, gt_bboxes): 'torch.Tensor: return the given ``bboxes``' return gt_bboxes def decode(self, bboxes, pred_bboxes): 'torch.Tensor: return the given ``pred_bboxes``' return pred_bboxes
def build_iou_calculator(cfg, default_args=None): 'Builder of IoU calculator.' return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
def build_match_cost(cfg, default_args=None): 'Builder of IoU calculator.' return build_from_cfg(cfg, MATCH_COST, default_args)
@BBOX_SAMPLERS.register_module() class CombinedSampler(BaseSampler): 'A sampler that combines positive sampler and negative sampler.' def __init__(self, pos_sampler, neg_sampler, **kwargs): super(CombinedSampler, self).__init__(**kwargs) self.pos_sampler = build_sampler(pos_sampler, **kwargs) self.neg_sampler = build_sampler(neg_sampler, **kwargs) def _sample_pos(self, **kwargs): 'Sample positive samples.' raise NotImplementedError def _sample_neg(self, **kwargs): 'Sample negative samples.' raise NotImplementedError
@BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): 'Instance balanced sampler that samples equal number of positive samples\n for each instance.' def _sample_pos(self, assign_result, num_expected, **kwargs): 'Sample positive boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): The assigned results of boxes.\n num_expected (int): The number of expected positive samples\n\n Returns:\n Tensor or ndarray: sampled indices.\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False) if (pos_inds.numel() != 0): pos_inds = pos_inds.squeeze(1) if (pos_inds.numel() <= num_expected): return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int((round((num_expected / float(num_gts))) + 1)) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero((assign_result.gt_inds == i.item()), as_tuple=False) if (inds.numel() != 0): inds = inds.squeeze(1) else: continue if (len(inds) > num_per_gt): inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((set(pos_inds.cpu()) - set(sampled_inds.cpu())))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to(assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif (len(sampled_inds) > num_expected): sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds
@BBOX_SAMPLERS.register_module() class IoUBalancedNegSampler(RandomSampler): 'IoU Balanced Sampling.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n The others are sampled from proposals whose IoU are higher than\n `floor_thr`. These proposals are sampled from some bins evenly, which are\n split by `num_bins` via IoU evenly.\n\n Args:\n num (int): number of proposals.\n pos_fraction (float): fraction of positive proposals.\n floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n set to -1 if all using IoU balanced sampling.\n floor_fraction (float): sampling fraction of proposals under floor_thr.\n num_bins (int): number of bins in IoU balanced sampling.\n ' def __init__(self, num, pos_fraction, floor_thr=(- 1), floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert ((floor_thr >= 0) or (floor_thr == (- 1))) assert (0 <= floor_fraction <= 1) assert (num_bins >= 1) self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): 'Sample according to the iou interval.\n\n Args:\n max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n truth boxes.\n full_set (set(int)): A full set of indices of boxes。\n num_expected (int): Number of expected samples。\n\n Returns:\n np.ndarray: Indices of samples\n ' max_iou = max_overlaps.max() iou_interval = ((max_iou - self.floor_thr) / self.num_bins) per_num_expected = int((num_expected / self.num_bins)) sampled_inds = [] for i in range(self.num_bins): start_iou = (self.floor_thr + (i * iou_interval)) end_iou = (self.floor_thr + ((i + 1) * iou_interval)) tmp_set = set(np.where(np.logical_and((max_overlaps >= start_iou), (max_overlaps < end_iou)))[0]) tmp_inds = list((tmp_set & full_set)) if (len(tmp_inds) > per_num_expected): tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((full_set - set(sampled_inds)))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): 'Sample negative boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): The assigned results of boxes.\n num_expected (int): The number of expected negative samples\n\n Returns:\n Tensor or ndarray: sampled indices.\n ' neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() neg_set = set(neg_inds.cpu().numpy()) if (self.floor_thr > 0): floor_set = set(np.where(np.logical_and((max_overlaps >= 0), (max_overlaps < self.floor_thr)))[0]) iou_sampling_set = set(np.where((max_overlaps >= self.floor_thr))[0]) elif (self.floor_thr == 0): floor_set = set(np.where((max_overlaps == 0))[0]) iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0]) else: floor_set = set() iou_sampling_set = set(np.where((max_overlaps > self.floor_thr))[0]) self.floor_thr = 0 floor_neg_inds = list((floor_set & neg_set)) iou_sampling_neg_inds = list((iou_sampling_set & neg_set)) num_expected_iou_sampling = int((num_expected * (1 - self.floor_fraction))) if (len(iou_sampling_neg_inds) > num_expected_iou_sampling): if (self.num_bins >= 2): iou_sampled_inds = self.sample_via_interval(max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int) num_expected_floor = (num_expected - len(iou_sampled_inds)) if (len(floor_neg_inds) > num_expected_floor): sampled_floor_inds = self.random_choice(floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds)) if (len(sampled_inds) < num_expected): num_extra = (num_expected - len(sampled_inds)) extra_inds = np.array(list((neg_set - set(sampled_inds)))) if (len(extra_inds) > num_extra): extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device) return sampled_inds
@BBOX_SAMPLERS.register_module() class MaskPseudoSampler(BaseSampler): 'A pseudo sampler that does not do sampling actually.' def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): 'Sample positive samples.' raise NotImplementedError def _sample_neg(self, **kwargs): 'Sample negative samples.' raise NotImplementedError def sample(self, assign_result, masks, gt_masks, **kwargs): 'Directly returns the positive and negative indices of samples.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n masks (torch.Tensor): Bounding boxes\n gt_masks (torch.Tensor): Ground truth boxes\n Returns:\n :obj:`SamplingResult`: sampler results\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique() neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique() gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags) return sampling_result
@BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): 'Online Hard Example Mining Sampler described in `Training Region-based\n Object Detectors with Online Hard Example Mining\n <https://arxiv.org/abs/1604.03540>`_.\n ' def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add_gt_as_proposals=True, loss_key='loss_cls', **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.context = context if (not hasattr(self.context, 'num_stages')): self.bbox_head = self.context.bbox_head else: self.bbox_head = self.context.bbox_head[self.context.current_stage] self.loss_key = loss_key def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) if (not hasattr(self.context, 'num_stages')): bbox_results = self.context._bbox_forward(feats, rois) else: bbox_results = self.context._bbox_forward(self.context.current_stage, feats, rois) cls_score = bbox_results['cls_score'] loss = self.bbox_head.loss(cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key] (_, topk_loss_inds) = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): 'Sample positive boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n num_expected (int): Number of expected positive samples\n bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n feats (list[torch.Tensor], optional): Multi-level features.\n Defaults to None.\n\n Returns:\n torch.Tensor: Indices of positive samples\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False) if (pos_inds.numel() != 0): pos_inds = pos_inds.squeeze(1) if (pos_inds.numel() <= num_expected): return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): 'Sample negative boxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n num_expected (int): Number of expected negative samples\n bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n feats (list[torch.Tensor], optional): Multi-level features.\n Defaults to None.\n\n Returns:\n torch.Tensor: Indices of negative samples\n ' neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: neg_labels = assign_result.labels.new_empty(neg_inds.size(0)).fill_(self.bbox_head.num_classes) return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats)
@BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): 'A pseudo sampler that does not do sampling actually.' def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): 'Sample positive samples.' raise NotImplementedError def _sample_neg(self, **kwargs): 'Sample negative samples.' raise NotImplementedError def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): 'Directly returns the positive and negative indices of samples.\n\n Args:\n assign_result (:obj:`AssignResult`): Assigned results\n bboxes (torch.Tensor): Bounding boxes\n gt_bboxes (torch.Tensor): Ground truth boxes\n\n Returns:\n :obj:`SamplingResult`: sampler results\n ' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False).squeeze((- 1)).unique() neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False).squeeze((- 1)).unique() gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
@BBOX_SAMPLERS.register_module() class RandomSampler(BaseSampler): 'Random sampler.\n\n Args:\n num (int): Number of samples\n pos_fraction (float): Fraction of positive samples\n neg_pos_up (int, optional): Upper bound number of negative and\n positive samples. Defaults to -1.\n add_gt_as_proposals (bool, optional): Whether to add ground truth\n boxes as proposals. Defaults to True.\n ' def __init__(self, num, pos_fraction, neg_pos_ub=(- 1), add_gt_as_proposals=True, **kwargs): from mmdet.core.bbox import demodata super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.rng = demodata.ensure_rng(kwargs.get('rng', None)) def random_choice(self, gallery, num): 'Random select some elements from the gallery.\n\n If `gallery` is a Tensor, the returned indices will be a Tensor;\n If `gallery` is a ndarray or list, the returned indices will be a\n ndarray.\n\n Args:\n gallery (Tensor | ndarray | list): indices pool.\n num (int): expected sample num.\n\n Returns:\n Tensor or ndarray: sampled indices.\n ' assert (len(gallery) >= num) is_tensor = isinstance(gallery, torch.Tensor) if (not is_tensor): if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) rand_inds = gallery[perm] if (not is_tensor): rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): 'Randomly sample some positive samples.' pos_inds = torch.nonzero((assign_result.gt_inds > 0), as_tuple=False) if (pos_inds.numel() != 0): pos_inds = pos_inds.squeeze(1) if (pos_inds.numel() <= num_expected): return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, **kwargs): 'Randomly sample some negative samples.' neg_inds = torch.nonzero((assign_result.gt_inds == 0), as_tuple=False) if (neg_inds.numel() != 0): neg_inds = neg_inds.squeeze(1) if (len(neg_inds) <= num_expected): return neg_inds else: return self.random_choice(neg_inds, num_expected)
class GeneralData(NiceRepr): 'A general data structure of OpenMMlab.\n\n A data structure that stores the meta information,\n the annotations of the images or the model predictions,\n which can be used in communication between components.\n\n The attributes in `GeneralData` are divided into two parts,\n the `meta_info_fields` and the `data_fields` respectively.\n\n - `meta_info_fields`: Usually contains the\n information about the image such as filename,\n image_shape, pad_shape, etc. All attributes in\n it are immutable once set,\n but the user can add new meta information with\n `set_meta_info` function, all information can be accessed\n with methods `meta_info_keys`, `meta_info_values`,\n `meta_info_items`.\n\n - `data_fields`: Annotations or model predictions are\n stored. The attributes can be accessed or modified by\n dict-like or object-like operations, such as\n `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`,\n `values()`, `items()`. Users can also apply tensor-like methods\n to all obj:`torch.Tensor` in the `data_fileds`,\n such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()`\n `.detach()`, `.numpy()`\n\n Args:\n meta_info (dict, optional): A dict contains the meta information\n of single image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n data (dict, optional): A dict contains annotations of single image or\n model predictions. Default: None.\n\n Examples:\n >>> from mmdet.core import GeneralData\n >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n >>> instance_data = GeneralData(meta_info=img_meta)\n >>> img_shape in instance_data\n True\n >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3])\n >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3])\n >>> print(results)\n <GeneralData(\n\n META INFORMATION\n img_shape: (800, 1196, 3)\n pad_shape: (800, 1216, 3)\n\n DATA FIELDS\n shape of det_labels: torch.Size([4])\n shape of det_scores: torch.Size([4])\n\n ) at 0x7f84acd10f90>\n >>> instance_data.det_scores\n tensor([0.0100, 0.1000, 0.2000, 0.3000])\n >>> instance_data.det_labels\n tensor([0, 1, 2, 3])\n >>> instance_data[\'det_labels\']\n tensor([0, 1, 2, 3])\n >>> \'det_labels\' in instance_data\n True\n >>> instance_data.img_shape\n (800, 1196, 3)\n >>> \'det_scores\' in instance_data\n True\n >>> del instance_data.det_scores\n >>> \'det_scores\' in instance_data\n False\n >>> det_labels = instance_data.pop(\'det_labels\', None)\n >>> det_labels\n tensor([0, 1, 2, 3])\n >>> \'det_labels\' in instance_data\n >>> False\n ' def __init__(self, meta_info=None, data=None): self._meta_info_fields = set() self._data_fields = set() if (meta_info is not None): self.set_meta_info(meta_info=meta_info) if (data is not None): self.set_data(data) def set_meta_info(self, meta_info): 'Add meta information.\n\n Args:\n meta_info (dict): A dict contains the meta information\n of image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n ' assert isinstance(meta_info, dict), f'meta should be a `dict` but get {meta_info}' meta = copy.deepcopy(meta_info) for (k, v) in meta.items(): if (k in self._meta_info_fields): ori_value = getattr(self, k) if isinstance(ori_value, (torch.Tensor, np.ndarray)): if (ori_value == v).all(): continue else: raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ') elif (ori_value == v): continue else: raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ') else: self._meta_info_fields.add(k) self.__dict__[k] = v def set_data(self, data): 'Update a dict to `data_fields`.\n\n Args:\n data (dict): A dict contains annotations of image or\n model predictions. Default: None.\n ' assert isinstance(data, dict), f'meta should be a `dict` but get {data}' for (k, v) in data.items(): self.__setattr__(k, v) def new(self, meta_info=None, data=None): 'Return a new results with same image meta information.\n\n Args:\n meta_info (dict, optional): A dict contains the meta information\n of image. such as `img_shape`, `scale_factor`, etc.\n Default: None.\n data (dict, optional): A dict contains annotations of image or\n model predictions. Default: None.\n ' new_data = self.__class__() new_data.set_meta_info(dict(self.meta_info_items())) if (meta_info is not None): new_data.set_meta_info(meta_info) if (data is not None): new_data.set_data(data) return new_data def keys(self): '\n Returns:\n list: Contains all keys in data_fields.\n ' return [key for key in self._data_fields] def meta_info_keys(self): '\n Returns:\n list: Contains all keys in meta_info_fields.\n ' return [key for key in self._meta_info_fields] def values(self): '\n Returns:\n list: Contains all values in data_fields.\n ' return [getattr(self, k) for k in self.keys()] def meta_info_values(self): '\n Returns:\n list: Contains all values in meta_info_fields.\n ' return [getattr(self, k) for k in self.meta_info_keys()] def items(self): for k in self.keys(): (yield (k, getattr(self, k))) def meta_info_items(self): for k in self.meta_info_keys(): (yield (k, getattr(self, k))) def __setattr__(self, name, val): if (name in ('_meta_info_fields', '_data_fields')): if (not hasattr(self, name)): super().__setattr__(name, val) else: raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ') else: if (name in self._meta_info_fields): raise AttributeError(f'`{name}` is used in meta information,which is immutable') self._data_fields.add(name) super().__setattr__(name, val) def __delattr__(self, item): if (item in ('_meta_info_fields', '_data_fields')): raise AttributeError(f'{item} has been used as a private attribute, which is immutable. ') if (item in self._meta_info_fields): raise KeyError(f'{item} is used in meta information, which is immutable.') super().__delattr__(item) if (item in self._data_fields): self._data_fields.remove(item) __setitem__ = __setattr__ __delitem__ = __delattr__ def __getitem__(self, name): return getattr(self, name) def get(self, *args): assert (len(args) < 3), '`get` get more than 2 arguments' return self.__dict__.get(*args) def pop(self, *args): assert (len(args) < 3), '`pop` get more than 2 arguments' name = args[0] if (name in self._meta_info_fields): raise KeyError(f'{name} is a key in meta information, which is immutable') if (args[0] in self._data_fields): self._data_fields.remove(args[0]) return self.__dict__.pop(*args) elif (len(args) == 2): return args[1] else: raise KeyError(f'{args[0]}') def __contains__(self, item): return ((item in self._data_fields) or (item in self._meta_info_fields)) def to(self, *args, **kwargs): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if hasattr(v, 'to'): v = v.to(*args, **kwargs) new_data[k] = v return new_data def cpu(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.cpu() new_data[k] = v return new_data def cuda(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.cuda() new_data[k] = v return new_data def detach(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.detach() new_data[k] = v return new_data def numpy(self): 'Apply same name function to all tensors in data_fields.' new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() new_data[k] = v return new_data def __nice__(self): repr = '\n \n META INFORMATION \n' for (k, v) in self.meta_info_items(): repr += f'''{k}: {v} ''' repr += '\n DATA FIELDS \n' for (k, v) in self.items(): if isinstance(v, (torch.Tensor, np.ndarray)): repr += f'''shape of {k}: {v.shape} ''' else: repr += f'''{k}: {v} ''' return (repr + '\n')
class InstanceData(GeneralData): 'Data structure for instance-level annnotations or predictions.\n\n Subclass of :class:`GeneralData`. All value in `data_fields`\n should have the same length. This design refer to\n https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501\n\n Examples:\n >>> from mmdet.core import InstanceData\n >>> import numpy as np\n >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n >>> results = InstanceData(img_meta)\n >>> img_shape in results\n True\n >>> results.det_labels = torch.LongTensor([0, 1, 2, 3])\n >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3])\n >>> results["det_masks"] = np.ndarray(4, 2, 2)\n >>> len(results)\n 4\n >>> print(resutls)\n <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216, 3)\n img_shape: (800, 1196, 3)\n\n PREDICTIONS\n shape of det_labels: torch.Size([4])\n shape of det_masks: (4, 2, 2)\n shape of det_scores: torch.Size([4])\n\n ) at 0x7fe26b5ca990>\n >>> sorted_results = results[results.det_scores.sort().indices]\n >>> sorted_results.det_scores\n tensor([0.0100, 0.3000, 0.6000, 0.7000])\n >>> sorted_results.det_labels\n tensor([0, 3, 2, 1])\n >>> print(results[results.scores > 0.5])\n <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216, 3)\n img_shape: (800, 1196, 3)\n\n PREDICTIONS\n shape of det_labels: torch.Size([2])\n shape of det_masks: (2, 2, 2)\n shape of det_scores: torch.Size([2])\n\n ) at 0x7fe26b6d7790>\n >>> results[results.det_scores > 0.5].det_labels\n tensor([1, 2])\n >>> results[results.det_scores > 0.5].det_scores\n tensor([0.7000, 0.6000])\n ' def __setattr__(self, name, value): if (name in ('_meta_info_fields', '_data_fields')): if (not hasattr(self, name)): super().__setattr__(name, value) else: raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ') else: assert isinstance(value, (torch.Tensor, np.ndarray, list)), f'Can set {type(value)}, only support {(torch.Tensor, np.ndarray, list)}' if self._data_fields: assert (len(value) == len(self)), f'the length of values {len(value)} is not consistent with the length of this :obj:`InstanceData` {len(self)} ' super().__setattr__(name, value) def __getitem__(self, item): '\n Args:\n item (str, obj:`slice`,\n obj`torch.LongTensor`, obj:`torch.BoolTensor`):\n get the corresponding values according to item.\n\n Returns:\n obj:`InstanceData`: Corresponding values.\n ' assert len(self), ' This is a empty instance' assert isinstance(item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) if isinstance(item, str): return getattr(self, item) if (type(item) == int): if ((item >= len(self)) or (item < (- len(self)))): raise IndexError(f'Index {item} out of range!') else: item = slice(item, None, len(self)) new_data = self.new() if isinstance(item, torch.Tensor): assert (item.dim() == 1), 'Only support to get the values along the first dimension.' if isinstance(item, torch.BoolTensor): assert (len(item) == len(self)), f'The shape of the input(BoolTensor)) {len(item)} does not match the shape of the indexed tensor in results_filed {len(self)} at first dimension. ' for (k, v) in self.items(): if isinstance(v, torch.Tensor): new_data[k] = v[item] elif isinstance(v, np.ndarray): new_data[k] = v[item.cpu().numpy()] elif isinstance(v, list): r_list = [] if isinstance(item, torch.BoolTensor): indexes = torch.nonzero(item).view((- 1)) else: indexes = item for index in indexes: r_list.append(v[index]) new_data[k] = r_list else: for (k, v) in self.items(): new_data[k] = v[item] return new_data @staticmethod def cat(instances_list): 'Concat the predictions of all :obj:`InstanceData` in the list.\n\n Args:\n instances_list (list[:obj:`InstanceData`]): A list\n of :obj:`InstanceData`.\n\n Returns:\n obj:`InstanceData`\n ' assert all((isinstance(results, InstanceData) for results in instances_list)) assert (len(instances_list) > 0) if (len(instances_list) == 1): return instances_list[0] new_data = instances_list[0].new() for k in instances_list[0]._data_fields: values = [results[k] for results in instances_list] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, np.ndarray): values = np.concatenate(values, axis=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) else: raise ValueError(f'Can not concat the {k} which is a {type(v0)}') new_data[k] = values return new_data def __len__(self): if len(self._data_fields): for v in self.values(): return len(v) else: raise AssertionError('This is an empty `InstanceData`.')
def wider_face_classes(): return ['face']
def voc_classes(): return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def imagenet_det_classes(): return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra']
def imagenet_vid_classes(): return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra']
def coco_classes(): return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
def cityscapes_classes(): return ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
def oid_challenge_classes(): return ['Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', 'Vehicle registration plate', 'Microphone', 'Musical keyboard', 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', 'Tablet computer', 'Pillow', 'Kitchen & dining room table', 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon']
def oid_v6_classes(): return ['Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', 'Baseball bat', 'Baseball glove', 'Mixing bowl', 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', 'Kitchen & dining room table', 'Dog bed', 'Cake stand', 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet']
def get_classes(dataset): 'Get class names of a dataset.' alias2name = {} for (name, aliases) in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if (dataset in alias2name): labels = eval((alias2name[dataset] + '_classes()')) else: raise ValueError(f'Unrecognized dataset: {dataset}') else: raise TypeError(f'dataset must a str, but got {type(dataset)}') return labels
def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) dynamic_milestones = [0] dynamic_milestones.extend([dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) dynamic_intervals = [start_interval] dynamic_intervals.extend([dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) return (dynamic_milestones, dynamic_intervals)
class EvalHook(BaseEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(EvalHook, self).__init__(*args, **kwargs) self.use_dynamic_intervals = (dynamic_intervals is not None) if self.use_dynamic_intervals: (self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = (runner.epoch if self.by_epoch else runner.iter) step = bisect.bisect(self.dynamic_milestones, (progress + 1)) self.interval = self.dynamic_intervals[(step - 1)] def before_train_epoch(self, runner): 'Evaluate the model only at the start of training by epoch.' self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): 'perform evaluation and save ckpt.' if (not self._should_evaluate(runner)): return from mmdet.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader, show=False) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if (self.save_best and key_score): self._save_ckpt(runner, key_score)
class DistEvalHook(BaseDistEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(DistEvalHook, self).__init__(*args, **kwargs) self.use_dynamic_intervals = (dynamic_intervals is not None) if self.use_dynamic_intervals: (self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = (runner.epoch if self.by_epoch else runner.iter) step = bisect.bisect(self.dynamic_milestones, (progress + 1)) self.interval = self.dynamic_intervals[(step - 1)] def before_train_epoch(self, runner): 'Evaluate the model only at the start of training by epoch.' self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): 'perform evaluation and save ckpt.' if self.broadcast_bn_buffer: model = runner.model for (name, module) in model.named_modules(): if (isinstance(module, _BatchNorm) and module.track_running_stats): dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if (not self._should_evaluate(runner)): return tmpdir = self.tmpdir if (tmpdir is None): tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if (runner.rank == 0): print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if (self.save_best and key_score): self._save_ckpt(runner, key_score)
def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None): "Prepare sample input and wrap model for ONNX export.\n\n The ONNX export API only accept args, and all inputs should be\n torch.Tensor or corresponding types (such as tuple of tensor).\n So we should call this function before exporting. This function will:\n\n 1. generate corresponding inputs which are used to execute the model.\n 2. Wrap the model's forward function.\n\n For example, the MMDet models' forward function has a parameter\n ``return_loss:bool``. As we want to set it as False while export API\n supports neither bool type or kwargs. So we have to replace the forward\n method like ``model.forward = partial(model.forward, return_loss=False)``.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n input_config (dict): the exactly data in this dict depends on the\n framework. For MMSeg, we can just declare the input shape,\n and generate the dummy data accordingly. However, for MMDet,\n we may pass the real img path, or the NMS will return None\n as there is no legal bbox.\n\n Returns:\n tuple: (model, tensor_data) wrapped model which can be called by\n ``model(*tensor_data)`` and a list of inputs which are used to\n execute the model while exporting.\n " model = build_model_from_cfg(config_path, checkpoint_path, cfg_options=cfg_options) (one_img, one_meta) = preprocess_example_input(input_config) tensor_data = [one_img] model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False) opset_version = 11 try: from mmcv.onnx.symbolic import register_extra_symbolics except ModuleNotFoundError: raise NotImplementedError('please update mmcv to version>=v1.0.4') register_extra_symbolics(opset_version) return (model, tensor_data)
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): 'Build a model from config and load the given checkpoint.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n\n Returns:\n torch.nn.Module: the built model\n ' from mmdet.models import build_detector cfg = mmcv.Config.fromfile(config_path) if (cfg_options is not None): cfg.merge_from_dict(cfg_options) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu') if ('CLASSES' in checkpoint.get('meta', {})): model.CLASSES = checkpoint['meta']['CLASSES'] else: from mmdet.datasets import DATASETS dataset = DATASETS.get(cfg.data.test['type']) assert (dataset is not None) model.CLASSES = dataset.CLASSES model.cpu().eval() return model
def preprocess_example_input(input_config): "Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n Args:\n input_config (dict): customized config describing the example input.\n\n Returns:\n tuple: (one_img, one_meta), tensor of the example input image and meta information for the example input image.\n\n Examples:\n >>> from mmdet.core.export import preprocess_example_input\n >>> input_config = {\n >>> 'input_shape': (1,3,224,224),\n >>> 'input_path': 'demo/demo.jpg',\n >>> 'normalize_cfg': {\n >>> 'mean': (123.675, 116.28, 103.53),\n >>> 'std': (58.395, 57.12, 57.375)\n >>> }\n >>> }\n >>> one_img, one_meta = preprocess_example_input(input_config)\n >>> print(one_img.shape)\n torch.Size([1, 3, 224, 224])\n >>> print(one_meta)\n {'img_shape': (224, 224, 3),\n 'ori_shape': (224, 224, 3),\n 'pad_shape': (224, 224, 3),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False}\n " input_path = input_config['input_path'] input_shape = input_config['input_shape'] one_img = mmcv.imread(input_path) one_img = mmcv.imresize(one_img, input_shape[2:][::(- 1)]) show_img = one_img.copy() if ('normalize_cfg' in input_config.keys()): normalize_cfg = input_config['normalize_cfg'] mean = np.array(normalize_cfg['mean'], dtype=np.float32) std = np.array(normalize_cfg['std'], dtype=np.float32) to_rgb = normalize_cfg.get('to_rgb', True) one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) one_img = one_img.transpose(2, 0, 1) one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True) (_, C, H, W) = input_shape one_meta = {'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': np.ones(4, dtype=np.float32), 'flip': False, 'show_img': show_img, 'flip_direction': None} return (one_img, one_meta)
@HOOKS.register_module() class CheckInvalidLossHook(Hook): 'Check invalid loss hook.\n\n This hook will regularly check whether the loss is valid\n during training.\n\n Args:\n interval (int): Checking interval (every k iterations).\n Default: 50.\n ' def __init__(self, interval=50): self.interval = interval def after_train_iter(self, runner): if self.every_n_iters(runner, self.interval): assert torch.isfinite(runner.outputs['loss']), runner.logger.info('loss become infinite or NaN!')
class BaseEMAHook(Hook): "Exponential Moving Average Hook.\n\n Use Exponential Moving Average on all parameters of model in training\n process. All parameters have a ema backup, which update by the formula\n as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,\n the original model parameters are actually saved in ema field after train.\n\n Args:\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `ema_param = (1-momentum) * ema_param + momentum * cur_param`.\n Defaults to 0.0002.\n skip_buffers (bool): Whether to skip the model buffers, such as\n batchnorm running stats (running_mean, running_var), it does not\n perform the ema operation. Default to False.\n interval (int): Update ema parameter every interval iteration.\n Defaults to 1.\n resume_from (str, optional): The checkpoint path. Defaults to None.\n momentum_fun (func, optional): The function to change momentum\n during early iteration (also warmup) to help early training.\n It uses `momentum` as a constant. Defaults to None.\n " def __init__(self, momentum=0.0002, interval=1, skip_buffers=False, resume_from=None, momentum_fun=None): assert (0 < momentum < 1) self.momentum = momentum self.skip_buffers = skip_buffers self.interval = interval self.checkpoint = resume_from self.momentum_fun = momentum_fun def before_run(self, runner): "To resume model with it's ema parameters more friendly.\n\n Register ema parameter as ``named_buffer`` to model.\n " model = runner.model if is_module_wrapper(model): model = model.module self.param_ema_buffer = {} if self.skip_buffers: self.model_parameters = dict(model.named_parameters()) else: self.model_parameters = model.state_dict() for (name, value) in self.model_parameters.items(): buffer_name = f"ema_{name.replace('.', '_')}" self.param_ema_buffer[name] = buffer_name model.register_buffer(buffer_name, value.data.clone()) self.model_buffers = dict(model.named_buffers()) if (self.checkpoint is not None): runner.resume(self.checkpoint) def get_momentum(self, runner): return (self.momentum_fun(runner.iter) if self.momentum_fun else self.momentum) def after_train_iter(self, runner): 'Update ema parameter every self.interval iterations.' if (((runner.iter + 1) % self.interval) != 0): return momentum = self.get_momentum(runner) for (name, parameter) in self.model_parameters.items(): if parameter.dtype.is_floating_point: buffer_name = self.param_ema_buffer[name] buffer_parameter = self.model_buffers[buffer_name] buffer_parameter.mul_((1 - momentum)).add_(parameter.data, alpha=momentum) def after_train_epoch(self, runner): 'We load parameter values from ema backup to model before the\n EvalHook.' self._swap_ema_parameters() def before_train_epoch(self, runner): "We recover model's parameter from ema backup after last epoch's\n EvalHook." self._swap_ema_parameters() def _swap_ema_parameters(self): 'Swap the parameter of model with parameter in ema_buffer.' for (name, value) in self.model_parameters.items(): temp = value.data.clone() ema_buffer = self.model_buffers[self.param_ema_buffer[name]] value.data.copy_(ema_buffer.data) ema_buffer.data.copy_(temp)
@HOOKS.register_module() class ExpMomentumEMAHook(BaseEMAHook): 'EMAHook using exponential momentum strategy.\n\n Args:\n total_iter (int): The total number of iterations of EMA momentum.\n Defaults to 2000.\n ' def __init__(self, total_iter=2000, **kwargs): super(ExpMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = (lambda x: (((1 - self.momentum) * math.exp(((- (1 + x)) / total_iter))) + self.momentum))
@HOOKS.register_module() class LinearMomentumEMAHook(BaseEMAHook): 'EMAHook using linear momentum strategy.\n\n Args:\n warm_up (int): During first warm_up steps, we may use smaller decay\n to update ema parameters more slowly. Defaults to 100.\n ' def __init__(self, warm_up=100, **kwargs): super(LinearMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = (lambda x: min((self.momentum ** self.interval), ((1 + x) / (warm_up + x))))
@HOOKS.register_module() class SetEpochInfoHook(Hook): "Set runner's epoch information to the model." def before_train_epoch(self, runner): epoch = runner.epoch model = runner.model if is_module_wrapper(model): model = model.module model.set_epoch(epoch)
def get_norm_states(module): async_norm_states = OrderedDict() for (name, child) in module.named_modules(): if isinstance(child, nn.modules.batchnorm._NormBase): for (k, v) in child.state_dict().items(): async_norm_states['.'.join([name, k])] = v return async_norm_states
@HOOKS.register_module() class SyncNormHook(Hook): 'Synchronize Norm states after training epoch, currently used in YOLOX.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to switch to synchronizing norm interval. Default: 15.\n interval (int): Synchronizing norm interval. Default: 1.\n ' def __init__(self, num_last_epochs=15, interval=1): self.interval = interval self.num_last_epochs = num_last_epochs def before_train_epoch(self, runner): epoch = runner.epoch if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)): self.interval = 1 def after_train_epoch(self, runner): 'Synchronizing norm.' epoch = runner.epoch module = runner.model if (((epoch + 1) % self.interval) == 0): (_, world_size) = get_dist_info() if (world_size == 1): return norm_states = get_norm_states(module) if (len(norm_states) == 0): return norm_states = all_reduce_dict(norm_states, op='mean') module.load_state_dict(norm_states, strict=False)
@HOOKS.register_module() class SyncRandomSizeHook(Hook): "Change and synchronize the random image size across ranks.\n SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve\n similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),\n (832, 832)], multiscale_mode='range', keep_ratio=True)`.\n\n Note: Due to the multi-process dataloader, its behavior is different\n from YOLOX's official implementation, the official is to change the\n size every fixed iteration interval and what we achieved is a fixed\n epoch interval.\n\n Args:\n ratio_range (tuple[int]): Random ratio range. It will be multiplied\n by 32, and then change the dataset output image size.\n Default: (14, 26).\n img_scale (tuple[int]): Size of input image. Default: (640, 640).\n interval (int): The epoch interval of change image size. Default: 1.\n device (torch.device | str): device for returned tensors.\n Default: 'cuda'.\n " def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'): warnings.warn("DeprecationWarning: SyncRandomSizeHook is deprecated. Please use Resize pipeline to achieve similar functions. Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval.") (self.rank, world_size) = get_dist_info() self.is_distributed = (world_size > 1) self.ratio_range = ratio_range self.img_scale = img_scale self.interval = interval self.device = device def after_train_epoch(self, runner): 'Change the dataset output image size.' if ((self.ratio_range is not None) and (((runner.epoch + 1) % self.interval) == 0)): tensor = torch.LongTensor(2).to(self.device) if (self.rank == 0): size_factor = ((self.img_scale[1] * 1.0) / self.img_scale[0]) size = random.randint(*self.ratio_range) size = (int((32 * size)), (32 * int((size * size_factor)))) tensor[0] = size[0] tensor[1] = size[1] if self.is_distributed: dist.barrier() dist.broadcast(tensor, 0) runner.data_loader.dataset.update_dynamic_scale((tensor[0].item(), tensor[1].item()))
@HOOKS.register_module() class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): 'YOLOX learning rate scheme.\n\n There are two main differences between YOLOXLrUpdaterHook\n and CosineAnnealingLrUpdaterHook.\n\n 1. When the current running epoch is greater than\n `max_epoch-last_epoch`, a fixed learning rate will be used\n 2. The exp warmup scheme is different with LrUpdaterHook in MMCV\n\n Args:\n num_last_epochs (int): The number of epochs with a fixed learning rate\n before the end of the training.\n ' def __init__(self, num_last_epochs, **kwargs): self.num_last_epochs = num_last_epochs super(YOLOXLrUpdaterHook, self).__init__(**kwargs) def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): k = (self.warmup_ratio * pow(((cur_iters + 1) / float(self.warmup_iters)), 2)) warmup_lr = [(_lr * k) for _lr in regular_lr] return warmup_lr if isinstance(self.base_lr, dict): lr_groups = {} for (key, base_lr) in self.base_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.base_lr) def get_lr(self, runner, base_lr): last_iter = (len(runner.data_loader) * self.num_last_epochs) if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters progress += 1 if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr if (progress >= (max_progress - last_iter)): return target_lr else: return annealing_cos(base_lr, target_lr, ((progress - self.warmup_iters) / ((max_progress - self.warmup_iters) - last_iter)))
@HOOKS.register_module() class YOLOXModeSwitchHook(Hook): "Switch the mode of YOLOX during training.\n\n This hook turns off the mosaic and mixup data augmentation and switches\n to use L1 loss in bbox_head.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to close the data augmentation and switch to L1 loss.\n Default: 15.\n skip_type_keys (list[str], optional): Sequence of type string to be\n skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')\n " def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): self.num_last_epochs = num_last_epochs self.skip_type_keys = skip_type_keys self._restart_dataloader = False def before_train_epoch(self, runner): 'Close mosaic and mixup augmentation and switches to use L1 loss.' epoch = runner.epoch train_loader = runner.data_loader model = runner.model if is_module_wrapper(model): model = model.module if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)): runner.logger.info('No mosaic and mixup aug now!') train_loader.dataset.update_skip_type_keys(self.skip_type_keys) if (hasattr(train_loader, 'persistent_workers') and (train_loader.persistent_workers is True)): train_loader._DataLoader__initialized = False train_loader._iterator = None self._restart_dataloader = True runner.logger.info('Add additional L1 loss now!') model.bbox_head.use_l1 = True elif self._restart_dataloader: train_loader._DataLoader__initialized = True
def mask_matrix_nms(masks, labels, scores, filter_thr=(- 1), nms_pre=(- 1), max_num=(- 1), kernel='gaussian', sigma=2.0, mask_area=None): "Matrix NMS for multi-class masks.\n\n Args:\n masks (Tensor): Has shape (num_instances, h, w)\n labels (Tensor): Labels of corresponding masks,\n has shape (num_instances,).\n scores (Tensor): Mask scores of corresponding masks,\n has shape (num_instances).\n filter_thr (float): Score threshold to filter the masks\n after matrix nms. Default: -1, which means do not\n use filter_thr.\n nms_pre (int): The max number of instances to do the matrix nms.\n Default: -1, which means do not use nms_pre.\n max_num (int, optional): If there are more than max_num masks after\n matrix, only top max_num will be kept. Default: -1, which means\n do not use max_num.\n kernel (str): 'linear' or 'gaussian'.\n sigma (float): std in gaussian method.\n mask_area (Tensor): The sum of seg_masks.\n\n Returns:\n tuple(Tensor): Processed mask results.\n\n - scores (Tensor): Updated scores, has shape (n,).\n - labels (Tensor): Remained labels, has shape (n,).\n - masks (Tensor): Remained masks, has shape (n, w, h).\n - keep_inds (Tensor): The indices number of\n the remaining mask in the input mask, has shape (n,).\n " assert (len(labels) == len(masks) == len(scores)) if (len(labels) == 0): return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0)) if (mask_area is None): mask_area = masks.sum((1, 2)).float() else: assert (len(masks) == len(mask_area)) (scores, sort_inds) = torch.sort(scores, descending=True) keep_inds = sort_inds if ((nms_pre > 0) and (len(sort_inds) > nms_pre)): sort_inds = sort_inds[:nms_pre] keep_inds = keep_inds[:nms_pre] scores = scores[:nms_pre] masks = masks[sort_inds] mask_area = mask_area[sort_inds] labels = labels[sort_inds] num_masks = len(labels) flatten_masks = masks.reshape(num_masks, (- 1)).float() inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) expanded_mask_area = mask_area.expand(num_masks, num_masks) iou_matrix = (inter_matrix / ((expanded_mask_area + expanded_mask_area.transpose(1, 0)) - inter_matrix)).triu(diagonal=1) expanded_labels = labels.expand(num_masks, num_masks) label_matrix = (expanded_labels == expanded_labels.transpose(1, 0)).triu(diagonal=1) (compensate_iou, _) = (iou_matrix * label_matrix).max(0) compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0) decay_iou = (iou_matrix * label_matrix) if (kernel == 'gaussian'): decay_matrix = torch.exp((((- 1) * sigma) * (decay_iou ** 2))) compensate_matrix = torch.exp((((- 1) * sigma) * (compensate_iou ** 2))) (decay_coefficient, _) = (decay_matrix / compensate_matrix).min(0) elif (kernel == 'linear'): decay_matrix = ((1 - decay_iou) / (1 - compensate_iou)) (decay_coefficient, _) = decay_matrix.min(0) else: raise NotImplementedError(f'{kernel} kernel is not supported in matrix nms!') scores = (scores * decay_coefficient) if (filter_thr > 0): keep = (scores >= filter_thr) keep_inds = keep_inds[keep] if (not keep.any()): return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0)) masks = masks[keep] scores = scores[keep] labels = labels[keep] (scores, sort_inds) = torch.sort(scores, descending=True) keep_inds = keep_inds[sort_inds] if ((max_num > 0) and (len(sort_inds) > max_num)): sort_inds = sort_inds[:max_num] keep_inds = keep_inds[:max_num] scores = scores[:max_num] masks = masks[sort_inds] labels = labels[sort_inds] return (scores, labels, masks, keep_inds)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)): if (bucket_size_mb > 0): bucket_size_bytes = ((bucket_size_mb * 1024) * 1024) buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if (tp not in buckets): buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)): 'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n ' grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook): 'Deprecated optimizer hook for distributed training.' def __init__(self, *args, **kwargs): warnings.warn('"DistOptimizerHook" is deprecated, please switch to"mmcv.runner.OptimizerHook".') super().__init__(*args, **kwargs)
def reduce_mean(tensor): '"Obtain the mean of tensor on different GPUs.' if (not (dist.is_available() and dist.is_initialized())): return tensor tensor = tensor.clone() dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) return tensor