code stringlengths 17 6.64M |
|---|
class ExampleDuplicateModel(nn.Module):
def __init__(self):
super().__init__()
self.param1 = nn.Parameter(torch.ones(1))
self.conv1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False))
self.conv2 = nn.Sequential(nn.Conv2d(4, 2, kernel_size=1))
self.bn = nn.BatchNorm2d(2)
self.sub = SubModel()
self.conv3 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False))
self.conv3[0] = self.conv1[0]
if OPS_AVAILABLE:
from mmcv.ops import DeformConv2dPack
self.dcn = DeformConv2dPack(3, 4, kernel_size=3, deformable_groups=1)
def forward(self, x):
return x
|
class PseudoDataParallel(nn.Module):
def __init__(self):
super().__init__()
self.module = ExampleModel()
def forward(self, x):
return x
|
def check_default_optimizer(optimizer, model, prefix=''):
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
param_groups = optimizer.param_groups[0]
if OPS_AVAILABLE:
param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'dcn.weight', 'dcn.conv_offset.weight', 'dcn.conv_offset.bias']
else:
param_names = ['param1', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias', 'sub.param1', 'sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias']
param_dict = dict(model.named_parameters())
assert (len(param_groups['params']) == len(param_names))
for i in range(len(param_groups['params'])):
assert torch.equal(param_groups['params'][i], param_dict[(prefix + param_names[i])])
|
def check_sgd_optimizer(optimizer, model, prefix='', bias_lr_mult=1, bias_decay_mult=1, norm_decay_mult=1, dwconv_decay_mult=1, dcn_offset_lr_mult=1, bypass_duplicate=False):
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
model_parameters = list(model.parameters())
assert (len(param_groups) == len(model_parameters))
for (i, param) in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_group['momentum'] == momentum)
param1 = param_groups[0]
assert (param1['lr'] == base_lr)
assert (param1['weight_decay'] == base_wd)
conv1_weight = param_groups[1]
assert (conv1_weight['lr'] == base_lr)
assert (conv1_weight['weight_decay'] == base_wd)
conv2_weight = param_groups[2]
assert (conv2_weight['lr'] == base_lr)
assert (conv2_weight['weight_decay'] == base_wd)
conv2_bias = param_groups[3]
assert (conv2_bias['lr'] == (base_lr * bias_lr_mult))
assert (conv2_bias['weight_decay'] == (base_wd * bias_decay_mult))
bn_weight = param_groups[4]
assert (bn_weight['lr'] == base_lr)
assert (bn_weight['weight_decay'] == (base_wd * norm_decay_mult))
bn_bias = param_groups[5]
assert (bn_bias['lr'] == base_lr)
assert (bn_bias['weight_decay'] == (base_wd * norm_decay_mult))
sub_param1 = param_groups[6]
assert (sub_param1['lr'] == base_lr)
assert (sub_param1['weight_decay'] == base_wd)
sub_conv1_weight = param_groups[7]
assert (sub_conv1_weight['lr'] == base_lr)
assert (sub_conv1_weight['weight_decay'] == (base_wd * dwconv_decay_mult))
sub_conv1_bias = param_groups[8]
assert (sub_conv1_bias['lr'] == (base_lr * bias_lr_mult))
assert (sub_conv1_bias['weight_decay'] == (base_wd * dwconv_decay_mult))
sub_gn_weight = param_groups[9]
assert (sub_gn_weight['lr'] == base_lr)
assert (sub_gn_weight['weight_decay'] == (base_wd * norm_decay_mult))
sub_gn_bias = param_groups[10]
assert (sub_gn_bias['lr'] == base_lr)
assert (sub_gn_bias['weight_decay'] == (base_wd * norm_decay_mult))
if torch.cuda.is_available():
dcn_conv_weight = param_groups[11]
assert (dcn_conv_weight['lr'] == base_lr)
assert (dcn_conv_weight['weight_decay'] == base_wd)
dcn_offset_weight = param_groups[12]
assert (dcn_offset_weight['lr'] == (base_lr * dcn_offset_lr_mult))
assert (dcn_offset_weight['weight_decay'] == base_wd)
dcn_offset_bias = param_groups[13]
assert (dcn_offset_bias['lr'] == (base_lr * dcn_offset_lr_mult))
assert (dcn_offset_bias['weight_decay'] == base_wd)
|
def test_default_optimizer_constructor():
model = ExampleModel()
with pytest.raises(TypeError):
optimizer_cfg = []
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optim_constructor(model)
with pytest.raises(TypeError):
optimizer_cfg = dict(lr=0.0001)
paramwise_cfg = ['error']
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optim_constructor(model)
with pytest.raises(ValueError):
optimizer_cfg = dict(lr=0.0001, weight_decay=None)
paramwise_cfg = dict(bias_decay_mult=1, norm_decay_mult=1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optim_constructor(model)
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model)
model = PseudoDataParallel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = None
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model, prefix='module.')
if torch.cuda.is_available():
model = torch.nn.DataParallel(ExampleModel())
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = None
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model, prefix='module.')
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict()
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model)
model = ExampleModel()
for param in model.parameters():
param.requires_grad = False
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict()
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg)
optimizer = optim_constructor(model)
check_default_optimizer(optimizer, model)
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, **paramwise_cfg)
model = ExampleModel()
optimizer_cfg = dict(type='Rprop', lr=base_lr)
paramwise_cfg = dict(bias_lr_mult=2)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.Rprop)
assert (optimizer.defaults['lr'] == base_lr)
model_parameters = list(model.parameters())
assert (len(param_groups) == len(model_parameters))
for (i, param) in enumerate(model_parameters):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_groups[0]['lr'] == base_lr)
assert (param_groups[1]['lr'] == base_lr)
assert (param_groups[2]['lr'] == base_lr)
assert (param_groups[3]['lr'] == (base_lr * paramwise_cfg['bias_lr_mult']))
assert (param_groups[4]['lr'] == base_lr)
assert (param_groups[5]['lr'] == base_lr)
assert (param_groups[6]['lr'] == base_lr)
assert (param_groups[7]['lr'] == base_lr)
assert (param_groups[8]['lr'] == (base_lr * paramwise_cfg['bias_lr_mult']))
assert (param_groups[9]['lr'] == base_lr)
assert (param_groups[10]['lr'] == base_lr)
if OPS_AVAILABLE:
assert (param_groups[11]['lr'] == base_lr)
assert (param_groups[12]['lr'] == base_lr)
assert (param_groups[13]['lr'] == base_lr)
model = PseudoDataParallel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, prefix='module.', **paramwise_cfg)
if torch.cuda.is_available():
model = torch.nn.DataParallel(ExampleModel())
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, prefix='module.', **paramwise_cfg)
for param in model.parameters():
param.requires_grad = False
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
for (i, (name, param)) in enumerate(model.named_parameters()):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_group['momentum'] == momentum)
assert (param_group['lr'] == base_lr)
assert (param_group['weight_decay'] == base_wd)
model = ExampleDuplicateModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1)
with pytest.raises(ValueError) as excinfo:
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optim_constructor(model)
assert ('some parameters appear in more than one parameter group' == excinfo.value)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1, bypass_duplicate=True)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
with warnings.catch_warnings(record=True) as w:
optimizer = optim_constructor(model)
warnings.simplefilter('always')
assert (len(w) == 1)
assert (str(w[0].message) == 'conv3.0 is duplicate. It is skipped since bypass_duplicate=True')
model_parameters = list(model.parameters())
num_params = (14 if OPS_AVAILABLE else 11)
assert (len(optimizer.param_groups) == len(model_parameters) == num_params)
check_sgd_optimizer(optimizer, model, **paramwise_cfg)
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(custom_keys={'param1': dict(lr_mult=10), 'sub': dict(lr_mult=0.1, decay_mult=0), 'sub.gn': dict(lr_mult=0.01), 'non_exist_key': dict(lr_mult=0.0)}, norm_decay_mult=0.5)
with pytest.raises(TypeError):
paramwise_cfg_ = dict(custom_keys=[0.1, 0.0001])
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg_)
optimizer = optim_constructor(model)
with pytest.raises(ValueError):
optimizer_cfg_ = dict(type='SGD', lr=0.01)
paramwise_cfg_ = dict(custom_keys={'.backbone': dict(decay_mult=0.5)})
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg_, paramwise_cfg_)
optimizer = optim_constructor(model)
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
param_groups = optimizer.param_groups
groups = []
group_settings = []
groups.append(['param1', 'sub.param1'])
group_settings.append({'lr': (base_lr * 10), 'momentum': momentum, 'weight_decay': base_wd})
groups.append(['sub.gn.weight', 'sub.gn.bias'])
group_settings.append({'lr': (base_lr * 0.01), 'momentum': momentum, 'weight_decay': base_wd})
groups.append(['sub.conv1.weight', 'sub.conv1.bias'])
group_settings.append({'lr': (base_lr * 0.1), 'momentum': momentum, 'weight_decay': 0})
groups.append(['bn.weight', 'bn.bias'])
group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': (base_wd * 0.5)})
groups.append(['conv1.weight', 'conv2.weight', 'conv2.bias'])
group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': base_wd})
num_params = (14 if OPS_AVAILABLE else 11)
assert (len(param_groups) == num_params)
for (i, (name, param)) in enumerate(model.named_parameters()):
assert torch.equal(param_groups[i]['params'][0], param)
for (group, settings) in zip(groups, group_settings):
if (name in group):
for setting in settings:
assert (param_groups[i][setting] == settings[setting]), f'{name} {setting}'
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, momentum=momentum)
paramwise_cfg = dict(custom_keys={'param1': dict(lr_mult=10)})
optim_constructor = DefaultOptimizerConstructor(optimizer_cfg, paramwise_cfg)
optimizer = optim_constructor(model)
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == 0)
param_groups = optimizer.param_groups
groups = []
group_settings = []
groups.append(['param1', 'sub.param1'])
group_settings.append({'lr': (base_lr * 10), 'momentum': momentum, 'weight_decay': 0})
groups.append(['sub.conv1.weight', 'sub.conv1.bias', 'sub.gn.weight', 'sub.gn.bias', 'conv1.weight', 'conv2.weight', 'conv2.bias', 'bn.weight', 'bn.bias'])
group_settings.append({'lr': base_lr, 'momentum': momentum, 'weight_decay': 0})
num_params = (14 if OPS_AVAILABLE else 11)
assert (len(param_groups) == num_params)
for (i, (name, param)) in enumerate(model.named_parameters()):
assert torch.equal(param_groups[i]['params'][0], param)
for (group, settings) in zip(groups, group_settings):
if (name in group):
for setting in settings:
assert (param_groups[i][setting] == settings[setting]), f'{name} {setting}'
|
def test_torch_optimizers():
torch_optimizers = ['ASGD', 'Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'LBFGS', 'Optimizer', 'RMSprop', 'Rprop', 'SGD', 'SparseAdam']
assert set(torch_optimizers).issubset(set(TORCH_OPTIMIZERS))
|
def test_build_optimizer_constructor():
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1)
optim_constructor_cfg = dict(type='DefaultOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
check_sgd_optimizer(optimizer, model, **paramwise_cfg)
from mmcv.runner import OPTIMIZERS
from mmcv.utils import build_from_cfg
@OPTIMIZER_BUILDERS.register_module()
class MyOptimizerConstructor(DefaultOptimizerConstructor):
def __call__(self, model):
if hasattr(model, 'module'):
model = model.module
conv1_lr_mult = self.paramwise_cfg.get('conv1_lr_mult', 1.0)
params = []
for (name, param) in model.named_parameters():
param_group = {'params': [param]}
if (name.startswith('conv1') and param.requires_grad):
param_group['lr'] = (self.base_lr * conv1_lr_mult)
params.append(param_group)
optimizer_cfg['params'] = params
return build_from_cfg(optimizer_cfg, OPTIMIZERS)
paramwise_cfg = dict(conv1_lr_mult=5)
optim_constructor_cfg = dict(type='MyOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)
optim_constructor = build_optimizer_constructor(optim_constructor_cfg)
optimizer = optim_constructor(model)
param_groups = optimizer.param_groups
assert isinstance(optimizer, torch.optim.SGD)
assert (optimizer.defaults['lr'] == base_lr)
assert (optimizer.defaults['momentum'] == momentum)
assert (optimizer.defaults['weight_decay'] == base_wd)
for (i, param) in enumerate(model.parameters()):
param_group = param_groups[i]
assert torch.equal(param_group['params'][0], param)
assert (param_group['momentum'] == momentum)
assert (param_groups[1]['lr'] == (base_lr * paramwise_cfg['conv1_lr_mult']))
assert (param_groups[1]['weight_decay'] == base_wd)
|
def test_build_optimizer():
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)
optimizer = build_optimizer(model, optimizer_cfg)
check_default_optimizer(optimizer, model)
model = ExampleModel()
optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum, paramwise_cfg=dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1))
optimizer = build_optimizer(model, optimizer_cfg)
check_sgd_optimizer(optimizer, model, **optimizer_cfg['paramwise_cfg'])
|
class OldStyleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
|
class Model(OldStyleModel):
def train_step(self):
pass
def val_step(self):
pass
|
def test_build_runner():
temp_root = tempfile.gettempdir()
dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)])
default_args = dict(model=Model(), work_dir=osp.join(temp_root, dir_name), logger=logging.getLogger())
cfg = dict(type='EpochBasedRunner', max_epochs=1)
runner = build_runner(cfg, default_args=default_args)
assert (runner._max_epochs == 1)
cfg = dict(type='IterBasedRunner', max_iters=1)
runner = build_runner(cfg, default_args=default_args)
assert (runner._max_iters == 1)
with pytest.raises(ValueError, match='Only one of'):
cfg = dict(type='IterBasedRunner', max_epochs=1, max_iters=1)
runner = build_runner(cfg, default_args=default_args)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_epoch_based_runner(runner_class):
with pytest.warns(DeprecationWarning):
model = OldStyleModel()
def batch_processor():
pass
_ = runner_class(model, batch_processor, logger=logging.getLogger())
with pytest.raises(TypeError):
model = OldStyleModel()
_ = runner_class(model, batch_processor=0, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
optimizer = 'NotAOptimizer'
_ = runner_class(model, optimizer=optimizer, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
optimizers = dict(optim1=torch.optim.Adam(), optim2='NotAOptimizer')
_ = runner_class(model, optimizer=optimizers, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
_ = runner_class(model, logger=None)
with pytest.raises(TypeError):
model = Model()
_ = runner_class(model, logger=logging.getLogger(), meta=['list'])
with pytest.raises(AssertionError):
model = OldStyleModel()
_ = runner_class(model, logger=logging.getLogger())
with pytest.raises(TypeError):
model = Model()
_ = runner_class(model, work_dir=1, logger=logging.getLogger())
with pytest.raises(RuntimeError):
def batch_processor():
pass
model = Model()
_ = runner_class(model, batch_processor, logger=logging.getLogger())
model = Model()
temp_root = tempfile.gettempdir()
dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)])
work_dir = osp.join(temp_root, dir_name)
_ = runner_class(model, work_dir=work_dir, logger=logging.getLogger())
assert osp.isdir(work_dir)
_ = runner_class(model, work_dir=work_dir, logger=logging.getLogger())
assert osp.isdir(work_dir)
os.removedirs(work_dir)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_runner_with_parallel(runner_class):
def batch_processor():
pass
model = MMDataParallel(OldStyleModel())
_ = runner_class(model, batch_processor, logger=logging.getLogger())
model = MMDataParallel(Model())
_ = runner_class(model, logger=logging.getLogger())
with pytest.raises(RuntimeError):
def batch_processor():
pass
model = MMDataParallel(Model())
_ = runner_class(model, batch_processor, logger=logging.getLogger())
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_save_checkpoint(runner_class):
model = Model()
runner = runner_class(model=model, logger=logging.getLogger())
with pytest.raises(TypeError):
runner.save_checkpoint('.', meta=list())
with tempfile.TemporaryDirectory() as root:
runner.save_checkpoint(root)
latest_path = osp.join(root, 'latest.pth')
assert osp.exists(latest_path)
if isinstance(runner, EpochBasedRunner):
first_ckp_path = osp.join(root, 'epoch_1.pth')
elif isinstance(runner, IterBasedRunner):
first_ckp_path = osp.join(root, 'iter_1.pth')
assert osp.exists(first_ckp_path)
if (platform.system() != 'Windows'):
assert (osp.realpath(latest_path) == osp.realpath(first_ckp_path))
else:
pass
torch.load(latest_path)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_build_lr_momentum_hook(runner_class):
model = Model()
runner = runner_class(model=model, logger=logging.getLogger())
lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9)
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 1)
lr_config = dict(policy='Cyclic', by_epoch=False, target_ratio=(10, 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 2)
lr_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 3)
lr_config = dict(policy='Step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11])
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 4)
lr_config = dict(policy='step', warmup='linear', warmup_iters=500, warmup_ratio=(1.0 / 3), step=[8, 11])
runner.register_lr_hook(lr_config)
assert (len(runner.hooks) == 5)
mom_config = dict(policy='CosineAnnealing', min_momentum_ratio=(0.99 / 0.95), by_epoch=False, warmup_iters=2, warmup_ratio=(0.9 / 0.95))
runner.register_momentum_hook(mom_config)
assert (len(runner.hooks) == 6)
mom_config = dict(policy='Cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_momentum_hook(mom_config)
assert (len(runner.hooks) == 7)
mom_config = dict(policy='cyclic', by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4)
runner.register_momentum_hook(mom_config)
assert (len(runner.hooks) == 8)
|
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values())
def test_register_timer_hook(runner_class):
model = Model()
runner = runner_class(model=model, logger=logging.getLogger())
timer_config = None
runner.register_timer_hook(timer_config)
assert (len(runner.hooks) == 0)
timer_config = dict(type='IterTimerHook')
runner.register_timer_hook(timer_config)
assert (len(runner.hooks) == 1)
assert isinstance(runner.hooks[0], IterTimerHook)
timer_config = IterTimerHook()
runner.register_timer_hook(timer_config)
assert (len(runner.hooks) == 2)
assert isinstance(runner.hooks[1], IterTimerHook)
|
def test_set_random_seed():
set_random_seed(0)
a_random = random.randint(0, 10)
a_np_random = np.random.rand(2, 2)
a_torch_random = torch.rand(2, 2)
assert (torch.backends.cudnn.deterministic is False)
assert (torch.backends.cudnn.benchmark is False)
assert (os.environ['PYTHONHASHSEED'] == str(0))
set_random_seed(0, True)
b_random = random.randint(0, 10)
b_np_random = np.random.rand(2, 2)
b_torch_random = torch.rand(2, 2)
assert (torch.backends.cudnn.deterministic is True)
if is_rocm_pytorch:
assert (torch.backends.cudnn.benchmark is True)
else:
assert (torch.backends.cudnn.benchmark is False)
assert (a_random == b_random)
assert np.equal(a_np_random, b_np_random).all()
assert torch.equal(a_torch_random, b_torch_random)
|
def test_construct():
cfg = Config()
assert (cfg.filename is None)
assert (cfg.text == '')
assert (len(cfg) == 0)
assert (cfg._cfg_dict == {})
with pytest.raises(TypeError):
Config([0, 1])
cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test')
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == cfg.pretty_text)
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'a.py')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/b.json')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == json.dumps(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'b.json')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/c.yaml')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == yaml.dump(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'c.yaml')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
cfg_file = osp.join(data_path, 'config/h.py')
path = osp.join(osp.dirname(__file__), 'data', 'config')
path = Path(path).as_posix()
cfg_dict = dict(item1='h.py', item2=path, item3='abc_h')
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == cfg.pretty_text)
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'h.py')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert (Config.fromfile(dump_file)['item2'] == cfg_dict['item2'])
assert (Config.fromfile(dump_file)['item3'] == cfg_dict['item3'])
cfg_dict = dict(item1='{{fileBasename}}', item2='{{ fileDirname}}', item3='abc_{{ fileBasenameNoExtension }}')
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == cfg_dict['item1'])
assert (Config.fromfile(cfg_file, False)['item2'] == cfg_dict['item2'])
assert (Config.fromfile(cfg_file, False)['item3'] == cfg_dict['item3'])
cfg_file = osp.join(data_path, 'config/p.yaml')
cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config'))
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == yaml.dump(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'p.yaml')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
cfg_file = osp.join(data_path, 'config/o.json')
cfg_dict = dict(item1=osp.join(osp.dirname(__file__), 'data', 'config'))
cfg = Config(cfg_dict, filename=cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == open(cfg_file, 'r').read())
assert (cfg.dump() == json.dumps(cfg_dict))
with tempfile.TemporaryDirectory() as temp_config_dir:
dump_file = osp.join(temp_config_dir, 'o.json')
cfg.dump(dump_file)
assert (cfg.dump() == open(dump_file, 'r').read())
assert Config.fromfile(dump_file)
assert (Config.fromfile(dump_file)['item1'] == cfg_dict['item1'])
assert Config.fromfile(cfg_file, False)
assert (Config.fromfile(cfg_file, False)['item1'] == '{{ fileDirname }}')
|
def test_fromfile():
for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.text == ((osp.abspath(osp.expanduser(cfg_file)) + '\n') + open(cfg_file, 'r').read()))
cfg_file = osp.join(data_path, 'config', 'q.py')
imported_file = osp.join(data_path, 'config', 'r.py')
target_pkg = osp.join(osp.dirname(__file__), 'r.py')
shutil.copy(imported_file, target_pkg)
Config.fromfile(cfg_file, import_custom_modules=True)
assert (os.environ.pop('TEST_VALUE') == 'test')
os.remove(target_pkg)
with pytest.raises(FileNotFoundError):
Config.fromfile('no_such_file.py')
with pytest.raises(IOError):
Config.fromfile(osp.join(data_path, 'color.jpg'))
|
def test_fromstring():
for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
file_format = osp.splitext(filename)[(- 1)]
in_cfg = Config.fromfile(cfg_file)
out_cfg = Config.fromstring(in_cfg.pretty_text, '.py')
assert (in_cfg._cfg_dict == out_cfg._cfg_dict)
cfg_str = open(cfg_file, 'r').read()
out_cfg = Config.fromstring(cfg_str, file_format)
assert (in_cfg._cfg_dict == out_cfg._cfg_dict)
cfg_file = osp.join(data_path, 'config', 'b.json')
in_cfg = Config.fromfile(cfg_file)
with pytest.raises(Exception):
Config.fromstring(in_cfg.pretty_text, '.json')
cfg_str = open(cfg_file, 'r').read()
with pytest.raises(Exception):
Config.fromstring(cfg_str, '.py')
|
def test_merge_from_base():
cfg_file = osp.join(data_path, 'config/d.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
base_cfg_file = osp.join(data_path, 'config/base.py')
merge_text = ((osp.abspath(osp.expanduser(base_cfg_file)) + '\n') + open(base_cfg_file, 'r').read())
merge_text += ((('\n' + osp.abspath(osp.expanduser(cfg_file))) + '\n') + open(cfg_file, 'r').read())
assert (cfg.text == merge_text)
assert (cfg.item1 == [2, 3])
assert (cfg.item2.a == 1)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test_base')
with pytest.raises(TypeError):
Config.fromfile(osp.join(data_path, 'config/e.py'))
|
def test_merge_from_multiple_bases():
cfg_file = osp.join(data_path, 'config/l.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
with pytest.raises(KeyError):
Config.fromfile(osp.join(data_path, 'config/m.py'))
|
def test_base_variables():
for file in ['t.py', 't.json', 't.yaml']:
cfg_file = osp.join(data_path, f'config/{file}')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
assert (cfg.item8 == file)
assert (cfg.item9 == dict(a=0))
assert (cfg.item10 == [3.1, 4.2, 5.3])
for file in ['u.py', 'u.json', 'u.yaml']:
cfg_file = osp.join(data_path, f'config/{file}')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.base == '_base_.item8')
assert (cfg.item1 == [1, 2])
assert (cfg.item2.a == 0)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test')
assert (cfg.item5 == dict(a=0, b=1))
assert (cfg.item6 == [dict(a=0), dict(b=1)])
assert (cfg.item7 == dict(a=[0, 1, 2], b=dict(c=[3.1, 4.2, 5.3])))
assert (cfg.item8 == 't.py')
assert (cfg.item9 == dict(a=0))
assert (cfg.item10 == [3.1, 4.2, 5.3])
assert (cfg.item11 == 't.py')
assert (cfg.item12 == dict(a=0))
assert (cfg.item13 == [3.1, 4.2, 5.3])
assert (cfg.item14 == [1, 2])
assert (cfg.item15 == dict(a=dict(b=dict(a=0)), b=[False], c=['test'], d=[[{'e': 0}], [{'a': 0}, {'b': 1}]], e=[1, 2]))
cfg_file = osp.join(data_path, 'config/v.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item21 == 't.py')
assert (cfg.item22 == 't.py')
assert (cfg.item23 == [3.1, 4.2, 5.3])
assert (cfg.item24 == [3.1, 4.2, 5.3])
assert (cfg.item25 == dict(a=dict(b=[3.1, 4.2, 5.3]), b=[[3.1, 4.2, 5.3]], c=[[{'e': 't.py'}], [{'a': 0}, {'b': 1}]], e='t.py'))
|
def test_merge_recursive_bases():
cfg_file = osp.join(data_path, 'config/f.py')
cfg = Config.fromfile(cfg_file)
assert isinstance(cfg, Config)
assert (cfg.filename == cfg_file)
assert (cfg.item1 == [2, 3])
assert (cfg.item2.a == 1)
assert (cfg.item3 is False)
assert (cfg.item4 == 'test_recursive_bases')
|
def test_merge_from_dict():
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config.fromfile(cfg_file)
input_options = {'item2.a': 1, 'item2.b': 0.1, 'item3': False}
cfg.merge_from_dict(input_options)
assert (cfg.item2 == dict(a=1, b=0.1))
assert (cfg.item3 is False)
cfg_file = osp.join(data_path, 'config/s.py')
cfg = Config.fromfile(cfg_file)
input_options = {'item.0.a': 1, 'item.1.b': 1}
cfg.merge_from_dict(input_options, allow_list_keys=True)
assert (cfg.item == [{'a': 1}, {'b': 1, 'c': 0}])
input_options = {'item.0.a': 1, 'item.1.b': 1}
with pytest.raises(TypeError):
cfg.merge_from_dict(input_options, allow_list_keys=False)
input_options = {'item.2.a': 1}
with pytest.raises(KeyError):
cfg.merge_from_dict(input_options, allow_list_keys=True)
|
def test_merge_delete():
cfg_file = osp.join(data_path, 'config/delete.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == dict(a=0))
assert (cfg.item2 == dict(a=0, b=0))
assert (cfg.item3 is True)
assert (cfg.item4 == 'test')
assert ('_delete_' not in cfg.item2)
assert (type(cfg.item1) == ConfigDict)
assert (type(cfg.item2) == ConfigDict)
|
def test_merge_intermediate_variable():
cfg_file = osp.join(data_path, 'config/i_child.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == [1, 2])
assert (cfg.item2 == dict(a=0))
assert (cfg.item3 is True)
assert (cfg.item4 == 'test')
assert (cfg.item_cfg == dict(b=2))
assert (cfg.item5 == dict(cfg=dict(b=1)))
assert (cfg.item6 == dict(cfg=dict(b=2)))
|
def test_fromfile_in_config():
cfg_file = osp.join(data_path, 'config/code.py')
cfg = Config.fromfile(cfg_file)
assert (cfg.cfg.item1 == [1, 2])
assert (cfg.cfg.item2 == dict(a=0))
assert (cfg.cfg.item3 is True)
assert (cfg.cfg.item4 == 'test')
assert (cfg.item5 == 1)
|
def test_dict():
cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test')
for filename in ['a.py', 'b.json', 'c.yaml']:
cfg_file = osp.join(data_path, 'config', filename)
cfg = Config.fromfile(cfg_file)
assert (len(cfg) == 4)
assert (set(cfg.keys()) == set(cfg_dict.keys()))
assert (set(cfg._cfg_dict.keys()) == set(cfg_dict.keys()))
for value in cfg.values():
assert (value in cfg_dict.values())
for (name, value) in cfg.items():
assert (name in cfg_dict)
assert (value in cfg_dict.values())
assert (cfg.item1 == cfg_dict['item1'])
assert (cfg.item2 == cfg_dict['item2'])
assert (cfg.item2.a == 0)
assert (cfg.item3 == cfg_dict['item3'])
assert (cfg.item4 == cfg_dict['item4'])
with pytest.raises(AttributeError):
cfg.not_exist
for name in ['item1', 'item2', 'item3', 'item4']:
assert (name in cfg)
assert (cfg[name] == cfg_dict[name])
assert (cfg.get(name) == cfg_dict[name])
assert (cfg.get('not_exist') is None)
assert (cfg.get('not_exist', 0) == 0)
with pytest.raises(KeyError):
cfg['not_exist']
assert ('item1' in cfg)
assert ('not_exist' not in cfg)
cfg.update(dict(item1=0))
assert (cfg.item1 == 0)
cfg.update(dict(item2=dict(a=1)))
assert (cfg.item2.a == 1)
|
def test_setattr():
cfg = Config()
cfg.item1 = [1, 2]
cfg.item2 = {'a': 0}
cfg['item5'] = {'a': {'b': None}}
assert (cfg._cfg_dict['item1'] == [1, 2])
assert (cfg.item1 == [1, 2])
assert (cfg._cfg_dict['item2'] == {'a': 0})
assert (cfg.item2.a == 0)
assert (cfg._cfg_dict['item5'] == {'a': {'b': None}})
assert (cfg.item5.a.b is None)
|
def test_pretty_text():
cfg_file = osp.join(data_path, 'config/l.py')
cfg = Config.fromfile(cfg_file)
with tempfile.TemporaryDirectory() as temp_config_dir:
text_cfg_filename = osp.join(temp_config_dir, '_text_config.py')
with open(text_cfg_filename, 'w') as f:
f.write(cfg.pretty_text)
text_cfg = Config.fromfile(text_cfg_filename)
assert (text_cfg._cfg_dict == cfg._cfg_dict)
|
def test_dict_action():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
args = parser.parse_args(['--options', 'item2.a=a,b', 'item2.b=[(a,b), [1,2], false]'])
out_dict = {'item2.a': ['a', 'b'], 'item2.b': [('a', 'b'), [1, 2], False]}
assert (args.options == out_dict)
args = parser.parse_args(['--options', 'item2.a=[[1]]'])
out_dict = {'item2.a': [[1]]}
assert (args.options == out_dict)
with pytest.raises(AssertionError):
parser.parse_args(['--options', 'item2.a=[(a,b), [1,2], false'])
args = parser.parse_args(['--options', 'item2.a=1', 'item2.b=0.1', 'item2.c=x', 'item3=false'])
out_dict = {'item2.a': 1, 'item2.b': 0.1, 'item2.c': 'x', 'item3': False}
assert (args.options == out_dict)
cfg_file = osp.join(data_path, 'config/a.py')
cfg = Config.fromfile(cfg_file)
cfg.merge_from_dict(args.options)
assert (cfg.item2 == dict(a=1, b=0.1, c='x'))
assert (cfg.item3 is False)
|
def test_dump_mapping():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
with tempfile.TemporaryDirectory() as temp_config_dir:
text_cfg_filename = osp.join(temp_config_dir, '_text_config.py')
cfg.dump(text_cfg_filename)
text_cfg = Config.fromfile(text_cfg_filename)
assert (text_cfg._cfg_dict == cfg._cfg_dict)
|
def test_reserved_key():
cfg_file = osp.join(data_path, 'config/g.py')
with pytest.raises(KeyError):
Config.fromfile(cfg_file)
|
def test_syntax_error():
temp_cfg_file = tempfile.NamedTemporaryFile(suffix='.py', delete=False)
temp_cfg_path = temp_cfg_file.name
with open(temp_cfg_path, 'w') as f:
f.write('a=0b=dict(c=1)')
with pytest.raises(SyntaxError, match='There are syntax errors in config file'):
Config.fromfile(temp_cfg_path)
temp_cfg_file.close()
os.remove(temp_cfg_path)
|
def test_pickle_support():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
with tempfile.TemporaryDirectory() as temp_config_dir:
pkl_cfg_filename = osp.join(temp_config_dir, '_pickle.pkl')
dump(cfg, pkl_cfg_filename)
pkl_cfg = load(pkl_cfg_filename)
assert (pkl_cfg._cfg_dict == cfg._cfg_dict)
|
def test_deprecation():
deprecated_cfg_files = [osp.join(data_path, 'config/deprecated.py'), osp.join(data_path, 'config/deprecated_as_base.py')]
for cfg_file in deprecated_cfg_files:
with pytest.warns(DeprecationWarning):
cfg = Config.fromfile(cfg_file)
assert (cfg.item1 == 'expected')
|
def test_deepcopy():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
new_cfg = copy.deepcopy(cfg)
assert isinstance(new_cfg, Config)
assert (new_cfg._cfg_dict == cfg._cfg_dict)
assert (new_cfg._cfg_dict is not cfg._cfg_dict)
assert (new_cfg._filename == cfg._filename)
assert (new_cfg._text == cfg._text)
|
def test_copy():
cfg_file = osp.join(data_path, 'config/n.py')
cfg = Config.fromfile(cfg_file)
new_cfg = copy.copy(cfg)
assert isinstance(new_cfg, Config)
assert (new_cfg is not cfg)
assert (new_cfg._cfg_dict is cfg._cfg_dict)
assert (new_cfg._filename == cfg._filename)
assert (new_cfg._text == cfg._text)
|
def test_collect_env():
try:
import torch
except ModuleNotFoundError:
pytest.skip('skipping tests that require PyTorch')
from mmcv.utils import collect_env
env_info = collect_env()
expected_keys = ['sys.platform', 'Python', 'CUDA available', 'PyTorch', 'PyTorch compiling details', 'OpenCV', 'MMCV', 'MMCV Compiler', 'MMCV CUDA Compiler']
for key in expected_keys:
assert (key in env_info)
if env_info['CUDA available']:
for key in ['CUDA_HOME', 'NVCC']:
assert (key in env_info)
if (sys.platform != 'win32'):
assert ('GCC' in env_info)
assert (env_info['sys.platform'] == sys.platform)
assert (env_info['Python'] == sys.version.replace('\n', ''))
assert (env_info['MMCV'] == mmcv.__version__)
|
def test_load_url():
url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth'
url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth'
if (digit_version(TORCH_VERSION) < digit_version('1.7.0')):
model_zoo.load_url(url1)
with pytest.raises(RuntimeError):
model_zoo.load_url(url2)
else:
model_zoo.load_url(url1)
model_zoo.load_url(url2)
load_url(url1)
if (digit_version(TORCH_VERSION) < digit_version('1.5.0')):
with pytest.raises(RuntimeError):
load_url(url2)
else:
load_url(url2)
|
@patch('torch.distributed.get_rank', (lambda : 0))
@patch('torch.distributed.is_initialized', (lambda : True))
@patch('torch.distributed.is_available', (lambda : True))
def test_get_logger_rank0():
logger = get_logger('rank0.pkg1')
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert (logger.handlers[0].level == logging.INFO)
logger = get_logger('rank0.pkg2', log_level=logging.DEBUG)
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert (logger.handlers[0].level == logging.DEBUG)
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('rank0.pkg3', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 2)
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert isinstance(logger.handlers[1], logging.FileHandler)
logger_pkg3 = get_logger('rank0.pkg3')
assert (id(logger_pkg3) == id(logger))
logging.shutdown()
os.remove(f.name)
logger_pkg3 = get_logger('rank0.pkg3.subpkg')
assert (logger_pkg3.handlers == logger_pkg3.handlers)
|
@patch('torch.distributed.get_rank', (lambda : 1))
@patch('torch.distributed.is_initialized', (lambda : True))
@patch('torch.distributed.is_available', (lambda : True))
def test_get_logger_rank1():
logger = get_logger('rank1.pkg1')
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert isinstance(logger.handlers[0], logging.StreamHandler)
assert (logger.handlers[0].level == logging.INFO)
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('rank1.pkg2', log_file=f.name)
assert isinstance(logger, logging.Logger)
assert (len(logger.handlers) == 1)
assert (logger.handlers[0].level == logging.INFO)
logging.shutdown()
os.remove(f.name)
|
def test_print_log_print(capsys):
print_log('welcome', logger=None)
(out, _) = capsys.readouterr()
assert (out == 'welcome\n')
|
def test_print_log_silent(capsys, caplog):
print_log('welcome', logger='silent')
(out, _) = capsys.readouterr()
assert (out == '')
assert (len(caplog.records) == 0)
|
def test_print_log_logger(caplog):
print_log('welcome', logger='mmcv')
assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.INFO, 'welcome'))
print_log('welcome', logger='mmcv', level=logging.ERROR)
assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.ERROR, 'welcome'))
with tempfile.NamedTemporaryFile(delete=False) as f:
logger = get_logger('abc', log_file=f.name)
print_log('welcome', logger=logger)
assert (caplog.record_tuples[(- 1)] == ('abc', logging.INFO, 'welcome'))
with open(f.name, 'r') as fin:
log_text = fin.read()
regex_time = '\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}'
match = re.fullmatch((regex_time + ' - abc - INFO - welcome\\n'), log_text)
assert (match is not None)
logging.shutdown()
os.remove(f.name)
|
def test_print_log_exception():
with pytest.raises(TypeError):
print_log('welcome', logger=0)
|
def test_to_ntuple():
single_number = 2
assert (mmcv.utils.to_1tuple(single_number) == (single_number,))
assert (mmcv.utils.to_2tuple(single_number) == (single_number, single_number))
assert (mmcv.utils.to_3tuple(single_number) == (single_number, single_number, single_number))
assert (mmcv.utils.to_4tuple(single_number) == (single_number, single_number, single_number, single_number))
assert (mmcv.utils.to_ntuple(5)(single_number) == (single_number, single_number, single_number, single_number, single_number))
assert (mmcv.utils.to_ntuple(6)(single_number) == (single_number, single_number, single_number, single_number, single_number, single_number))
|
def test_iter_cast():
assert (mmcv.list_cast([1, 2, 3], int) == [1, 2, 3])
assert (mmcv.list_cast(['1.1', 2, '3'], float) == [1.1, 2.0, 3.0])
assert (mmcv.list_cast([1, 2, 3], str) == ['1', '2', '3'])
assert (mmcv.tuple_cast((1, 2, 3), str) == ('1', '2', '3'))
assert (next(mmcv.iter_cast([1, 2, 3], str)) == '1')
with pytest.raises(TypeError):
mmcv.iter_cast([1, 2, 3], '')
with pytest.raises(TypeError):
mmcv.iter_cast(1, str)
|
def test_is_seq_of():
assert mmcv.is_seq_of([1.0, 2.0, 3.0], float)
assert mmcv.is_seq_of([(1,), (2,), (3,)], tuple)
assert mmcv.is_seq_of((1.0, 2.0, 3.0), float)
assert mmcv.is_list_of([1.0, 2.0, 3.0], float)
assert (not mmcv.is_seq_of((1.0, 2.0, 3.0), float, seq_type=list))
assert (not mmcv.is_tuple_of([1.0, 2.0, 3.0], float))
assert (not mmcv.is_seq_of([1.0, 2, 3], int))
assert (not mmcv.is_seq_of((1.0, 2, 3), int))
|
def test_slice_list():
in_list = [1, 2, 3, 4, 5, 6]
assert (mmcv.slice_list(in_list, [1, 2, 3]) == [[1], [2, 3], [4, 5, 6]])
assert (mmcv.slice_list(in_list, [len(in_list)]) == [in_list])
with pytest.raises(TypeError):
mmcv.slice_list(in_list, 2.0)
with pytest.raises(ValueError):
mmcv.slice_list(in_list, [1, 2])
|
def test_concat_list():
assert (mmcv.concat_list([[1, 2]]) == [1, 2])
assert (mmcv.concat_list([[1, 2], [3, 4, 5], [6]]) == [1, 2, 3, 4, 5, 6])
|
def test_requires_package(capsys):
@mmcv.requires_package('nnn')
def func_a():
pass
@mmcv.requires_package(['numpy', 'n1', 'n2'])
def func_b():
pass
@mmcv.requires_package('numpy')
def func_c():
return 1
with pytest.raises(RuntimeError):
func_a()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n')
with pytest.raises(RuntimeError):
func_b()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n')
assert (func_c() == 1)
|
def test_requires_executable(capsys):
@mmcv.requires_executable('nnn')
def func_a():
pass
@mmcv.requires_executable(['ls', 'n1', 'n2'])
def func_b():
pass
@mmcv.requires_executable('mv')
def func_c():
return 1
with pytest.raises(RuntimeError):
func_a()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "nnn" are required in method "func_a" but not found, please install them first.\n')
with pytest.raises(RuntimeError):
func_b()
(out, _) = capsys.readouterr()
assert (out == 'Prerequisites "n1, n2" are required in method "func_b" but not found, please install them first.\n')
assert (func_c() == 1)
|
def test_import_modules_from_strings():
import os.path as osp_
import sys as sys_
(osp, sys) = mmcv.import_modules_from_strings(['os.path', 'sys'])
assert (osp == osp_)
assert (sys == sys_)
osp = mmcv.import_modules_from_strings('os.path')
assert (osp == osp_)
assert (mmcv.import_modules_from_strings(None) is None)
assert (mmcv.import_modules_from_strings([]) is None)
assert (mmcv.import_modules_from_strings('') is None)
with pytest.raises(TypeError):
mmcv.import_modules_from_strings(1)
with pytest.raises(TypeError):
mmcv.import_modules_from_strings([1])
with pytest.raises(ImportError):
mmcv.import_modules_from_strings('_not_implemented_module')
with pytest.warns(UserWarning):
imported = mmcv.import_modules_from_strings('_not_implemented_module', allow_failed_imports=True)
assert (imported is None)
with pytest.warns(UserWarning):
imported = mmcv.import_modules_from_strings(['os.path', '_not_implemented'], allow_failed_imports=True)
assert (imported[0] == osp)
assert (imported[1] is None)
|
def test_is_method_overridden():
class Base():
def foo1():
pass
def foo2():
pass
class Sub(Base):
def foo1():
pass
assert mmcv.is_method_overridden('foo1', Base, Sub)
assert (not mmcv.is_method_overridden('foo2', Base, Sub))
sub_instance = Sub()
assert mmcv.is_method_overridden('foo1', Base, sub_instance)
assert (not mmcv.is_method_overridden('foo2', Base, sub_instance))
base_instance = Base()
with pytest.raises(AssertionError):
mmcv.is_method_overridden('foo1', base_instance, sub_instance)
|
def test_has_method():
class Foo():
def __init__(self, name):
self.name = name
def print_name(self):
print(self.name)
foo = Foo('foo')
assert (not has_method(foo, 'name'))
assert has_method(foo, 'print_name')
|
def test_deprecated_api_warning():
@deprecated_api_warning(name_dict=dict(old_key='new_key'))
def dummy_func(new_key=1):
return new_key
assert (dummy_func(old_key=2) == 2)
with pytest.raises(AssertionError):
dummy_func(old_key=1, new_key=2)
|
class TestJit(object):
def test_add_dict(self):
@mmcv.jit
def add_dict(oper):
rets = (oper['x'] + oper['y'])
return {'result': rets}
def add_dict_pyfunc(oper):
rets = (oper['x'] + oper['y'])
return {'result': rets}
a = torch.rand((3, 4))
b = torch.rand((3, 4))
oper = {'x': a, 'y': b}
rets_t = add_dict(oper)
rets = add_dict_pyfunc(oper)
assert ('result' in rets)
assert (rets_t['result'] == rets['result']).all()
def test_add_list(self):
@mmcv.jit
def add_list(oper, x, y):
rets = {}
for (idx, pair) in enumerate(oper):
rets[f'k{idx}'] = (pair['x'] + pair['y'])
rets[f'k{len(oper)}'] = (x + y)
return rets
def add_list_pyfunc(oper, x, y):
rets = {}
for (idx, pair) in enumerate(oper):
rets[f'k{idx}'] = (pair['x'] + pair['y'])
rets[f'k{len(oper)}'] = (x + y)
return rets
pair_num = 3
oper = []
for _ in range(pair_num):
oper.append({'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))})
a = torch.rand((3, 4))
b = torch.rand((3, 4))
rets = add_list_pyfunc(oper, x=a, y=b)
rets_t = add_list(oper, x=a, y=b)
for idx in range((pair_num + 1)):
assert (f'k{idx}' in rets_t)
assert (rets[f'k{idx}'] == rets_t[f'k{idx}']).all()
@skip_no_parrots
def test_jit_cache(self):
@mmcv.jit
def func(oper):
if (oper['const'] > 1):
return ((oper['x'] * 2) + oper['y'])
else:
return ((oper['x'] * 2) - oper['y'])
def pyfunc(oper):
if (oper['const'] > 1):
return ((oper['x'] * 2) + oper['y'])
else:
return ((oper['x'] * 2) - oper['y'])
assert (len(func._cache._cache) == 0)
oper = {'const': 2, 'x': torch.rand((3, 4)), 'y': torch.rand((3, 4))}
rets_plus = pyfunc(oper)
rets_plus_t = func(oper)
assert (rets_plus == rets_plus_t).all()
assert (len(func._cache._cache) == 1)
oper['const'] = 0.5
rets_minus = pyfunc(oper)
rets_minus_t = func(oper)
assert (rets_minus == rets_minus_t).all()
assert (len(func._cache._cache) == 2)
rets_a = ((rets_minus_t + rets_plus_t) / 4)
assert torch.allclose(oper['x'], rets_a)
@skip_no_parrots
def test_jit_shape(self):
@mmcv.jit
def func(a):
return (a + 1)
assert (len(func._cache._cache) == 0)
a = torch.ones((3, 4))
r = func(a)
assert (r.shape == (3, 4))
assert (r == 2).all()
assert (len(func._cache._cache) == 1)
a = torch.ones((2, 3, 4))
r = func(a)
assert (r.shape == (2, 3, 4))
assert (r == 2).all()
assert (len(func._cache._cache) == 2)
@skip_no_parrots
def test_jit_kwargs(self):
@mmcv.jit
def func(a, b):
return torch.mean(((a - b) * (a - b)))
assert (len(func._cache._cache) == 0)
x = torch.rand((16, 32))
y = torch.rand((16, 32))
func(x, y)
assert (len(func._cache._cache) == 1)
func(x, b=y)
assert (len(func._cache._cache) == 1)
func(b=y, a=x)
assert (len(func._cache._cache) == 1)
def test_jit_derivate(self):
@mmcv.jit(derivate=True)
def func(x, y):
return ((x + 2) * (y - 2))
a = torch.rand((3, 4))
b = torch.rand((3, 4))
a.requires_grad = True
c = func(a, b)
assert c.requires_grad
d = torch.empty_like(c)
d.fill_(1.0)
c.backward(d)
assert torch.allclose(a.grad, (b - 2))
assert (b.grad is None)
a.grad = None
c = func(a, b)
assert c.requires_grad
d = torch.empty_like(c)
d.fill_(2.7)
c.backward(d)
assert torch.allclose(a.grad, (2.7 * (b - 2)))
assert (b.grad is None)
def test_jit_optimize(self):
@mmcv.jit(optimize=True)
def func(a, b):
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
return torch.mean(((a - b) * (a - b)))
a = torch.rand((16, 32))
b = torch.rand((16, 32))
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
@mmcv.skip_no_elena
def test_jit_coderize(self):
if (not torch.cuda.is_available()):
return
@mmcv.jit(coderize=True)
def func(a, b):
return ((a + b) * (a - b))
def pyfunc(a, b):
return ((a + b) * (a - b))
a = torch.rand((16, 32), device='cuda')
b = torch.rand((16, 32), device='cuda')
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
def test_jit_value_dependent(self):
@mmcv.jit
def func(a, b):
torch.nonzero(a)
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
torch.nonzero(a)
return torch.mean(((a - b) * (a - b)))
a = torch.rand((16, 32))
b = torch.rand((16, 32))
c = func(a, b)
d = pyfunc(a, b)
assert torch.allclose(c, d)
@skip_no_parrots
def test_jit_check_input(self):
def func(x):
y = torch.rand_like(x)
return (x + y)
a = torch.ones((3, 4))
with pytest.raises(AssertionError):
func = mmcv.jit(func, check_input=(a,))
@skip_no_parrots
def test_jit_partial_shape(self):
@mmcv.jit(full_shape=False)
def func(a, b):
return torch.mean(((a - b) * (a - b)))
def pyfunc(a, b):
return torch.mean(((a - b) * (a - b)))
a = torch.rand((3, 4))
b = torch.rand((3, 4))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 1)
a = torch.rand((6, 5))
b = torch.rand((6, 5))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 1)
a = torch.rand((3, 4, 5))
b = torch.rand((3, 4, 5))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 2)
a = torch.rand((1, 9, 8))
b = torch.rand((1, 9, 8))
assert torch.allclose(func(a, b), pyfunc(a, b))
assert (len(func._cache._cache) == 2)
def test_instance_method(self):
class T(object):
def __init__(self, shape):
self._c = torch.rand(shape)
@mmcv.jit
def test_method(self, x, y):
return ((x * self._c) + y)
shape = (16, 32)
t = T(shape)
a = torch.rand(shape)
b = torch.rand(shape)
res = ((a * t._c) + b)
jit_res = t.test_method(a, b)
assert torch.allclose(res, jit_res)
t = T(shape)
res = ((a * t._c) + b)
jit_res = t.test_method(a, b)
assert torch.allclose(res, jit_res)
|
def test_is_filepath():
assert mmcv.is_filepath(__file__)
assert mmcv.is_filepath('abc')
assert mmcv.is_filepath(Path('/etc'))
assert (not mmcv.is_filepath(0))
|
def test_fopen():
assert hasattr(mmcv.fopen(__file__), 'read')
assert hasattr(mmcv.fopen(Path(__file__)), 'read')
|
def test_check_file_exist():
mmcv.check_file_exist(__file__)
with pytest.raises(FileNotFoundError):
mmcv.check_file_exist('no_such_file.txt')
|
def test_scandir():
folder = osp.join(osp.dirname(osp.dirname(__file__)), 'data/for_scan')
filenames = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT']
assert (set(mmcv.scandir(folder)) == set(filenames))
assert (set(mmcv.scandir(Path(folder))) == set(filenames))
assert (set(mmcv.scandir(folder, '.txt')) == set([filename for filename in filenames if filename.endswith('.txt')]))
assert (set(mmcv.scandir(folder, ('.json', '.txt'))) == set([filename for filename in filenames if filename.endswith(('.txt', '.json'))]))
assert (set(mmcv.scandir(folder, '.png')) == set())
filenames_recursive = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT', osp.join('sub', '1.json'), osp.join('sub', '1.txt'), '.file']
assert (set(mmcv.scandir(folder, recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')]))
assert (set(mmcv.scandir(Path(folder), recursive=True)) == set([filename for filename in filenames_recursive if (filename != '.file')]))
assert (set(mmcv.scandir(folder, '.txt', recursive=True)) == set([filename for filename in filenames_recursive if filename.endswith('.txt')]))
assert (set(mmcv.scandir(folder, '.TXT', recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.TXT'))]))
assert (set(mmcv.scandir(folder, ('.TXT', '.JSON'), recursive=True, case_sensitive=False)) == set([filename for filename in filenames_recursive if filename.endswith(('.txt', '.json', '.TXT'))]))
with pytest.raises(TypeError):
list(mmcv.scandir(123))
with pytest.raises(TypeError):
list(mmcv.scandir(folder, 111))
|
def reset_string_io(io):
io.truncate(0)
io.seek(0)
|
class TestProgressBar():
def test_start(self):
out = StringIO()
bar_width = 20
prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out)
assert (out.getvalue() == 'completed: 0, elapsed: 0s')
reset_string_io(out)
prog_bar = mmcv.ProgressBar(bar_width=bar_width, start=False, file=out)
assert (out.getvalue() == '')
reset_string_io(out)
prog_bar.start()
assert (out.getvalue() == 'completed: 0, elapsed: 0s')
reset_string_io(out)
prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out)
assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:")
reset_string_io(out)
prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, start=False, file=out)
assert (out.getvalue() == '')
reset_string_io(out)
prog_bar.start()
assert (out.getvalue() == f"[{(' ' * bar_width)}] 0/10, elapsed: 0s, ETA:")
def test_update(self):
out = StringIO()
bar_width = 20
prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out)
time.sleep(1)
reset_string_io(out)
prog_bar.update()
assert (out.getvalue() == 'completed: 1, elapsed: 1s, 1.0 tasks/s')
reset_string_io(out)
prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out)
time.sleep(1)
reset_string_io(out)
prog_bar.update()
assert (out.getvalue() == f'''
[{(('>' * 2) + (' ' * 18))}] 1/10, 1.0 task/s, elapsed: 1s, ETA: 9s''')
def test_adaptive_length(self):
with patch.dict('os.environ', {'COLUMNS': '80'}):
out = StringIO()
bar_width = 20
prog_bar = mmcv.ProgressBar(10, bar_width=bar_width, file=out)
time.sleep(1)
reset_string_io(out)
prog_bar.update()
assert (len(out.getvalue()) == 66)
os.environ['COLUMNS'] = '30'
reset_string_io(out)
prog_bar.update()
assert (len(out.getvalue()) == 48)
os.environ['COLUMNS'] = '60'
reset_string_io(out)
prog_bar.update()
assert (len(out.getvalue()) == 60)
|
def sleep_1s(num):
time.sleep(1)
return num
|
def test_track_progress_list():
out = StringIO()
ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out)
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n')
assert (ret == [1, 2, 3])
|
def test_track_progress_iterator():
out = StringIO()
ret = mmcv.track_progress(sleep_1s, ((i for i in [1, 2, 3]), 3), bar_width=3, file=out)
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n')
assert (ret == [1, 2, 3])
|
def test_track_iter_progress():
out = StringIO()
ret = []
for num in mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out):
ret.append(sleep_1s(num))
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n')
assert (ret == [1, 2, 3])
|
def test_track_enum_progress():
out = StringIO()
ret = []
count = []
for (i, num) in enumerate(mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out)):
ret.append(sleep_1s(num))
count.append(i)
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: 3s, ETA: 0s\n')
assert (ret == [1, 2, 3])
assert (count == [0, 1, 2])
|
def test_track_parallel_progress_list():
out = StringIO()
results = mmcv.track_parallel_progress(sleep_1s, [1, 2, 3, 4], 2, bar_width=4, file=out)
assert (results == [1, 2, 3, 4])
|
def test_track_parallel_progress_iterator():
out = StringIO()
results = mmcv.track_parallel_progress(sleep_1s, ((i for i in [1, 2, 3, 4]), 4), 2, bar_width=4, file=out)
assert (results == [1, 2, 3, 4])
|
def test_registry():
CATS = mmcv.Registry('cat')
assert (CATS.name == 'cat')
assert (CATS.module_dict == {})
assert (len(CATS) == 0)
@CATS.register_module()
class BritishShorthair():
pass
assert (len(CATS) == 1)
assert (CATS.get('BritishShorthair') is BritishShorthair)
class Munchkin():
pass
CATS.register_module(Munchkin)
assert (len(CATS) == 2)
assert (CATS.get('Munchkin') is Munchkin)
assert ('Munchkin' in CATS)
with pytest.raises(KeyError):
CATS.register_module(Munchkin)
CATS.register_module(Munchkin, force=True)
assert (len(CATS) == 2)
with pytest.raises(KeyError):
@CATS.register_module()
class BritishShorthair():
pass
@CATS.register_module(force=True)
class BritishShorthair():
pass
assert (len(CATS) == 2)
assert (CATS.get('PersianCat') is None)
assert ('PersianCat' not in CATS)
@CATS.register_module(name=['Siamese', 'Siamese2'])
class SiameseCat():
pass
assert (CATS.get('Siamese').__name__ == 'SiameseCat')
assert (CATS.get('Siamese2').__name__ == 'SiameseCat')
class SphynxCat():
pass
CATS.register_module(name='Sphynx', module=SphynxCat)
assert (CATS.get('Sphynx') is SphynxCat)
CATS.register_module(name=['Sphynx1', 'Sphynx2'], module=SphynxCat)
assert (CATS.get('Sphynx2') is SphynxCat)
repr_str = 'Registry(name=cat, items={'
repr_str += "'BritishShorthair': <class 'test_registry.test_registry.<locals>.BritishShorthair'>, "
repr_str += "'Munchkin': <class 'test_registry.test_registry.<locals>.Munchkin'>, "
repr_str += "'Siamese': <class 'test_registry.test_registry.<locals>.SiameseCat'>, "
repr_str += "'Siamese2': <class 'test_registry.test_registry.<locals>.SiameseCat'>, "
repr_str += "'Sphynx': <class 'test_registry.test_registry.<locals>.SphynxCat'>, "
repr_str += "'Sphynx1': <class 'test_registry.test_registry.<locals>.SphynxCat'>, "
repr_str += "'Sphynx2': <class 'test_registry.test_registry.<locals>.SphynxCat'>"
repr_str += '})'
assert (repr(CATS) == repr_str)
with pytest.raises(TypeError):
CATS.register_module(name=7474741, module=SphynxCat)
with pytest.raises(TypeError):
CATS.register_module(0)
with pytest.raises(TypeError):
@CATS.register_module()
def some_method():
pass
with pytest.warns(DeprecationWarning):
CATS.register_module(SphynxCat)
assert (CATS.get('SphynxCat').__name__ == 'SphynxCat')
with pytest.warns(DeprecationWarning):
CATS.register_module(SphynxCat, force=True)
assert (CATS.get('SphynxCat').__name__ == 'SphynxCat')
with pytest.warns(DeprecationWarning):
@CATS.register_module
class NewCat():
pass
assert (CATS.get('NewCat').__name__ == 'NewCat')
with pytest.warns(DeprecationWarning):
CATS.deprecated_register_module(SphynxCat, force=True)
assert (CATS.get('SphynxCat').__name__ == 'SphynxCat')
with pytest.warns(DeprecationWarning):
@CATS.deprecated_register_module
class CuteCat():
pass
assert (CATS.get('CuteCat').__name__ == 'CuteCat')
with pytest.warns(DeprecationWarning):
@CATS.deprecated_register_module(force=True)
class NewCat2():
pass
assert (CATS.get('NewCat2').__name__ == 'NewCat2')
|
def test_multi_scope_registry():
DOGS = mmcv.Registry('dogs')
assert (DOGS.name == 'dogs')
assert (DOGS.scope == 'test_registry')
assert (DOGS.module_dict == {})
assert (len(DOGS) == 0)
@DOGS.register_module()
class GoldenRetriever():
pass
assert (len(DOGS) == 1)
assert (DOGS.get('GoldenRetriever') is GoldenRetriever)
HOUNDS = mmcv.Registry('dogs', parent=DOGS, scope='hound')
@HOUNDS.register_module()
class BloodHound():
pass
assert (len(HOUNDS) == 1)
assert (HOUNDS.get('BloodHound') is BloodHound)
assert (DOGS.get('hound.BloodHound') is BloodHound)
assert (HOUNDS.get('hound.BloodHound') is BloodHound)
LITTLE_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='little_hound')
@LITTLE_HOUNDS.register_module()
class Dachshund():
pass
assert (len(LITTLE_HOUNDS) == 1)
assert (LITTLE_HOUNDS.get('Dachshund') is Dachshund)
assert (LITTLE_HOUNDS.get('hound.BloodHound') is BloodHound)
assert (HOUNDS.get('little_hound.Dachshund') is Dachshund)
assert (DOGS.get('hound.little_hound.Dachshund') is Dachshund)
MID_HOUNDS = mmcv.Registry('dogs', parent=HOUNDS, scope='mid_hound')
@MID_HOUNDS.register_module()
class Beagle():
pass
assert (MID_HOUNDS.get('Beagle') is Beagle)
assert (HOUNDS.get('mid_hound.Beagle') is Beagle)
assert (DOGS.get('hound.mid_hound.Beagle') is Beagle)
assert (LITTLE_HOUNDS.get('hound.mid_hound.Beagle') is Beagle)
assert (MID_HOUNDS.get('hound.BloodHound') is BloodHound)
assert (MID_HOUNDS.get('hound.Dachshund') is None)
|
def test_build_from_cfg():
BACKBONES = mmcv.Registry('backbone')
@BACKBONES.register_module()
class ResNet():
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
@BACKBONES.register_module()
class ResNeXt():
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNet)
assert ((model.depth == 50) and (model.stages == 4))
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args={'stages': 3})
assert isinstance(model, ResNet)
assert ((model.depth == 50) and (model.stages == 3))
cfg = dict(type='ResNeXt', depth=50, stages=3)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNeXt)
assert ((model.depth == 50) and (model.stages == 3))
cfg = dict(type=ResNet, depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
assert isinstance(model, ResNet)
assert ((model.depth == 50) and (model.stages == 4))
cfg = dict(depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type='ResNet'))
assert isinstance(model, ResNet)
assert ((model.depth == 50) and (model.stages == 4))
cfg = dict(depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(type=ResNet))
assert isinstance(model, ResNet)
assert ((model.depth == 50) and (model.stages == 4))
with pytest.raises(TypeError):
cfg = dict(type='VGG')
model = mmcv.build_from_cfg(cfg, 'BACKBONES')
with pytest.raises(KeyError):
cfg = dict(type='VGG')
model = mmcv.build_from_cfg(cfg, BACKBONES)
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=1)
with pytest.raises(TypeError):
cfg = dict(type=1000)
model = mmcv.build_from_cfg(cfg, BACKBONES)
with pytest.raises(KeyError, match='must contain the key "type"'):
cfg = dict(depth=50, stages=4)
model = mmcv.build_from_cfg(cfg, BACKBONES)
with pytest.raises(KeyError, match='must contain the key "type"'):
cfg = dict(depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=dict(stages=4))
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, 'BACKBONES')
with pytest.raises(TypeError):
cfg = dict(type='ResNet', depth=50)
model = mmcv.build_from_cfg(cfg, BACKBONES, default_args=0)
with pytest.raises(TypeError):
cfg = dict(type='ResNet', non_existing_arg=50)
model = mmcv.build_from_cfg(cfg, BACKBONES)
|
def test_assert_dict_contains_subset():
dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6)}
expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6)}
assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset)
expected_subset = {'a': 'test1', 'b': 2, 'c': (6, 4)}
assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
expected_subset = {'a': 'test1', 'b': 2, 'c': None}
assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
expected_subset = {'a': 'test1', 'b': 2, 'd': (4, 6)}
assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [1, 2, 3]])}
expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[5, 3, 5], [6, 2, 3]])}
assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])}
expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': np.array([[1]])}
assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset)
if (torch is not None):
dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6), 'd': torch.tensor([5, 3, 5])}
expected_subset = {'d': torch.tensor([5, 5, 5])}
assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
expected_subset = {'d': torch.tensor([[5, 3, 5], [4, 1, 2]])}
assert (not mmcv.assert_dict_contains_subset(dict_obj, expected_subset))
|
def test_assert_attrs_equal():
class TestExample(object):
(a, b, c) = (1, ('wvi', 3), [4.5, 3.14])
def test_func(self):
return self.b
assert mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14]})
assert (not mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14, 2]}))
assert (not mmcv.assert_attrs_equal(TestExample, {'bc': 54, 'c': [4.5, 3.14]}))
assert mmcv.assert_attrs_equal(TestExample, {'b': ('wvi', 3), 'test_func': TestExample.test_func})
if (torch is not None):
class TestExample(object):
(a, b) = (torch.tensor([1]), torch.tensor([4, 5]))
assert mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 5])})
assert (not mmcv.assert_attrs_equal(TestExample, {'a': torch.tensor([1]), 'b': torch.tensor([4, 6])}))
|
@pytest.mark.parametrize('obj', assert_dict_has_keys_data_1)
@pytest.mark.parametrize('expected_keys, ret_value', assert_dict_has_keys_data_2)
def test_assert_dict_has_keys(obj, expected_keys, ret_value):
assert (mmcv.assert_dict_has_keys(obj, expected_keys) == ret_value)
|
@pytest.mark.parametrize('result_keys', assert_keys_equal_data_1)
@pytest.mark.parametrize('target_keys, ret_value', assert_keys_equal_data_2)
def test_assert_keys_equal(result_keys, target_keys, ret_value):
assert (mmcv.assert_keys_equal(result_keys, target_keys) == ret_value)
|
@pytest.mark.skipif((torch is None), reason='requires torch library')
def test_assert_is_norm_layer():
assert (not mmcv.assert_is_norm_layer(nn.Conv3d(3, 64, 3)))
assert mmcv.assert_is_norm_layer(nn.BatchNorm3d(128))
assert mmcv.assert_is_norm_layer(nn.GroupNorm(8, 64))
assert (not mmcv.assert_is_norm_layer(nn.Sigmoid()))
|
@pytest.mark.skipif((torch is None), reason='requires torch library')
def test_assert_params_all_zeros():
demo_module = nn.Conv2d(3, 64, 3)
nn.init.constant_(demo_module.weight, 0)
nn.init.constant_(demo_module.bias, 0)
assert mmcv.assert_params_all_zeros(demo_module)
nn.init.xavier_normal_(demo_module.weight)
nn.init.constant_(demo_module.bias, 0)
assert (not mmcv.assert_params_all_zeros(demo_module))
demo_module = nn.Linear(2048, 400, bias=False)
nn.init.constant_(demo_module.weight, 0)
assert mmcv.assert_params_all_zeros(demo_module)
nn.init.normal_(demo_module.weight, mean=0, std=0.01)
assert (not mmcv.assert_params_all_zeros(demo_module))
|
def test_check_python_script(capsys):
mmcv.utils.check_python_script('./tests/data/scripts/hello.py zz')
captured = capsys.readouterr().out
assert (captured == 'hello zz!\n')
mmcv.utils.check_python_script('./tests/data/scripts/hello.py agent')
captured = capsys.readouterr().out
assert (captured == 'hello agent!\n')
with pytest.raises(SystemExit):
mmcv.utils.check_python_script('./tests/data/scripts/hello.py li zz')
|
def test_timer_init():
timer = mmcv.Timer(start=False)
assert (not timer.is_running)
timer.start()
assert timer.is_running
timer = mmcv.Timer()
assert timer.is_running
|
def test_timer_run():
timer = mmcv.Timer()
time.sleep(1)
assert (abs((timer.since_start() - 1)) < 0.01)
time.sleep(1)
assert (abs((timer.since_last_check() - 1)) < 0.01)
assert (abs((timer.since_start() - 2)) < 0.01)
timer = mmcv.Timer(False)
with pytest.raises(mmcv.TimerError):
timer.since_start()
with pytest.raises(mmcv.TimerError):
timer.since_last_check()
|
def test_timer_context(capsys):
with mmcv.Timer():
time.sleep(1)
(out, _) = capsys.readouterr()
assert (abs((float(out) - 1)) < 0.01)
with mmcv.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
(out, _) = capsys.readouterr()
assert (out == 'time: 1.0s\n')
|
@pytest.mark.skipif((digit_version(torch.__version__) < digit_version('1.6.0')), reason='torch.jit.is_tracing is not available before 1.6.0')
def test_is_jit_tracing():
def foo(x):
if is_jit_tracing():
return x
else:
return x.tolist()
x = torch.rand(3)
assert isinstance(foo(x), list)
traced_foo = torch.jit.trace(foo, (torch.rand(1),))
assert isinstance(traced_foo(x), torch.Tensor)
|
def test_digit_version():
assert (digit_version('0.2.16') == (0, 2, 16, 0, 0, 0))
assert (digit_version('1.2.3') == (1, 2, 3, 0, 0, 0))
assert (digit_version('1.2.3rc0') == (1, 2, 3, 0, (- 1), 0))
assert (digit_version('1.2.3rc1') == (1, 2, 3, 0, (- 1), 1))
assert (digit_version('1.0rc0') == (1, 0, 0, 0, (- 1), 0))
assert (digit_version('1.0') == digit_version('1.0.0'))
assert (digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5'))
assert (digit_version('1.0.0dev') < digit_version('1.0.0a'))
assert (digit_version('1.0.0a') < digit_version('1.0.0a1'))
assert (digit_version('1.0.0a') < digit_version('1.0.0b'))
assert (digit_version('1.0.0b') < digit_version('1.0.0rc'))
assert (digit_version('1.0.0rc1') < digit_version('1.0.0'))
assert (digit_version('1.0.0') < digit_version('1.0.0post'))
assert (digit_version('1.0.0post') < digit_version('1.0.0post1'))
assert (digit_version('v1') == (1, 0, 0, 0, 0, 0))
assert (digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0))
with pytest.raises(AssertionError):
digit_version('a')
with pytest.raises(AssertionError):
digit_version('1x')
with pytest.raises(AssertionError):
digit_version('1.x')
|
def test_parse_version_info():
assert (parse_version_info('0.2.16') == (0, 2, 16, 0, 0, 0))
assert (parse_version_info('1.2.3') == (1, 2, 3, 0, 0, 0))
assert (parse_version_info('1.2.3rc0') == (1, 2, 3, 0, 'rc', 0))
assert (parse_version_info('1.2.3rc1') == (1, 2, 3, 0, 'rc', 1))
assert (parse_version_info('1.0rc0') == (1, 0, 0, 0, 'rc', 0))
|
def _mock_cmd_success(cmd):
return '3b46d33e90c397869ad5103075838fdfc9812aa0'.encode('ascii')
|
def _mock_cmd_fail(cmd):
raise OSError
|
def test_get_git_hash():
with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success):
assert (get_git_hash() == '3b46d33e90c397869ad5103075838fdfc9812aa0')
assert (get_git_hash(digits=6) == '3b46d3')
assert (get_git_hash(digits=100) == get_git_hash())
with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_fail):
assert (get_git_hash() == 'unknown')
assert (get_git_hash(fallback='n/a') == 'n/a')
|
class TestVideoEditor():
@classmethod
def setup_class(cls):
cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4')
cls.num_frames = 168
@pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows')
def test_cut_concat_video(self):
part1_file = osp.join(tempfile.gettempdir(), '.mmcv_test1.mp4')
part2_file = osp.join(tempfile.gettempdir(), '.mmcv_test2.mp4')
mmcv.cut_video(self.video_path, part1_file, end=3, vcodec='h264')
mmcv.cut_video(self.video_path, part2_file, start=3, vcodec='h264')
v1 = mmcv.VideoReader(part1_file)
v2 = mmcv.VideoReader(part2_file)
assert (len(v1) == 75)
assert (len(v2) == (self.num_frames - 75))
out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4')
mmcv.concat_video([part1_file, part2_file], out_file)
v = mmcv.VideoReader(out_file)
assert (len(v) == self.num_frames)
os.remove(part1_file)
os.remove(part2_file)
os.remove(out_file)
@pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows')
def test_resize_video(self):
out_file = osp.join(tempfile.gettempdir(), '.mmcv_test.mp4')
mmcv.resize_video(self.video_path, out_file, (200, 100), log_level='panic')
v = mmcv.VideoReader(out_file)
assert (v.resolution == (200, 100))
os.remove(out_file)
mmcv.resize_video(self.video_path, out_file, ratio=2)
v = mmcv.VideoReader(out_file)
assert (v.resolution == ((294 * 2), (240 * 2)))
os.remove(out_file)
mmcv.resize_video(self.video_path, out_file, (1000, 480), keep_ar=True)
v = mmcv.VideoReader(out_file)
assert (v.resolution == ((294 * 2), (240 * 2)))
os.remove(out_file)
mmcv.resize_video(self.video_path, out_file, ratio=(2, 1.5), keep_ar=True)
v = mmcv.VideoReader(out_file)
assert (v.resolution == ((294 * 2), 360))
os.remove(out_file)
|
class TestCache():
def test_init(self):
with pytest.raises(ValueError):
mmcv.Cache(0)
cache = mmcv.Cache(100)
assert (cache.capacity == 100)
assert (cache.size == 0)
def test_put(self):
cache = mmcv.Cache(3)
for i in range(1, 4):
cache.put(f'k{i}', i)
assert (cache.size == i)
assert (cache._cache == OrderedDict([('k1', 1), ('k2', 2), ('k3', 3)]))
cache.put('k4', 4)
assert (cache.size == 3)
assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)]))
cache.put('k2', 2)
assert (cache._cache == OrderedDict([('k2', 2), ('k3', 3), ('k4', 4)]))
def test_get(self):
cache = mmcv.Cache(3)
assert (cache.get('key_none') is None)
assert (cache.get('key_none', 0) == 0)
cache.put('k1', 1)
assert (cache.get('k1') == 1)
|
class TestVideoReader():
@classmethod
def setup_class(cls):
cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4')
cls.num_frames = 168
cls.video_url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4'
def test_load(self):
v = mmcv.VideoReader(self.video_path)
assert (v.width == 294)
assert (v.height == 240)
assert (v.fps == 25)
assert (v.frame_cnt == self.num_frames)
assert (len(v) == self.num_frames)
assert v.opened
import cv2
assert isinstance(v.vcap, type(cv2.VideoCapture()))
v = mmcv.VideoReader(self.video_url)
assert (v.width == 320)
assert (v.height == 240)
assert (v.fps == 15)
assert (v.frame_cnt == 1889)
assert (len(v) == 1889)
assert v.opened
assert isinstance(v.vcap, type(cv2.VideoCapture()))
def test_read(self):
v = mmcv.VideoReader(self.video_path)
img = v.read()
assert (int(round(img.mean())) == 94)
img = v.get_frame(63)
assert (int(round(img.mean())) == 94)
img = v[64]
assert (int(round(img.mean())) == 205)
img = v[(- 104)]
assert (int(round(img.mean())) == 205)
img = v[63]
assert (int(round(img.mean())) == 94)
img = v[(- 105)]
assert (int(round(img.mean())) == 94)
img = v.read()
assert (int(round(img.mean())) == 205)
with pytest.raises(IndexError):
v.get_frame((self.num_frames + 1))
with pytest.raises(IndexError):
v[((- self.num_frames) - 1)]
def test_slice(self):
v = mmcv.VideoReader(self.video_path)
imgs = v[(- 105):(- 103)]
assert (int(round(imgs[0].mean())) == 94)
assert (int(round(imgs[1].mean())) == 205)
assert (len(imgs) == 2)
imgs = v[63:65]
assert (int(round(imgs[0].mean())) == 94)
assert (int(round(imgs[1].mean())) == 205)
assert (len(imgs) == 2)
imgs = v[64:62:(- 1)]
assert (int(round(imgs[0].mean())) == 205)
assert (int(round(imgs[1].mean())) == 94)
assert (len(imgs) == 2)
imgs = v[:5]
assert (len(imgs) == 5)
for img in imgs:
assert (int(round(img.mean())) == 94)
imgs = v[165:]
assert (len(imgs) == 3)
for img in imgs:
assert (int(round(img.mean())) == 0)
imgs = v[(- 3):]
assert (len(imgs) == 3)
for img in imgs:
assert (int(round(img.mean())) == 0)
def test_current_frame(self):
v = mmcv.VideoReader(self.video_path)
assert (v.current_frame() is None)
v.read()
img = v.current_frame()
assert (int(round(img.mean())) == 94)
def test_position(self):
v = mmcv.VideoReader(self.video_path)
assert (v.position == 0)
for _ in range(10):
v.read()
assert (v.position == 10)
v.get_frame(99)
assert (v.position == 100)
def test_iterator(self):
cnt = 0
for img in mmcv.VideoReader(self.video_path):
cnt += 1
assert (img.shape == (240, 294, 3))
assert (cnt == self.num_frames)
def test_with(self):
with mmcv.VideoReader(self.video_path) as v:
assert v.opened
assert (not v.opened)
def test_cvt2frames(self):
v = mmcv.VideoReader(self.video_path)
frame_dir = tempfile.mkdtemp()
v.cvt2frames(frame_dir)
assert osp.isdir(frame_dir)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
assert osp.isfile(filename)
os.remove(filename)
v = mmcv.VideoReader(self.video_path)
v.cvt2frames(frame_dir, show_progress=False)
assert osp.isdir(frame_dir)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
assert osp.isfile(filename)
os.remove(filename)
v = mmcv.VideoReader(self.video_path)
v.cvt2frames(frame_dir, file_start=100, filename_tmpl='{:03d}.JPEG', start=100, max_num=20)
assert osp.isdir(frame_dir)
for i in range(100, 120):
filename = f'{frame_dir}/{i:03d}.JPEG'
assert osp.isfile(filename)
os.remove(filename)
shutil.rmtree(frame_dir)
def test_frames2video(self):
v = mmcv.VideoReader(self.video_path)
frame_dir = tempfile.mkdtemp()
v.cvt2frames(frame_dir)
assert osp.isdir(frame_dir)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
assert osp.isfile(filename)
out_filename = osp.join(tempfile.gettempdir(), 'mmcv_test.avi')
mmcv.frames2video(frame_dir, out_filename)
v = mmcv.VideoReader(out_filename)
assert (v.fps == 30)
assert (len(v) == self.num_frames)
mmcv.frames2video(frame_dir, out_filename, fps=25, start=10, end=50, show_progress=False)
with mmcv.VideoReader(out_filename) as v:
assert (v.fps == 25)
assert (len(v) == 40)
for i in range(self.num_frames):
filename = f'{frame_dir}/{i:06d}.jpg'
os.remove(filename)
shutil.rmtree(frame_dir)
|
def test_color():
assert (mmcv.color_val(mmcv.Color.blue) == (255, 0, 0))
assert (mmcv.color_val('green') == (0, 255, 0))
assert (mmcv.color_val((1, 2, 3)) == (1, 2, 3))
assert (mmcv.color_val(100) == (100, 100, 100))
assert (mmcv.color_val(np.zeros(3, dtype=int)) == (0, 0, 0))
with pytest.raises(TypeError):
mmcv.color_val([255, 255, 255])
with pytest.raises(TypeError):
mmcv.color_val(1.0)
with pytest.raises(AssertionError):
mmcv.color_val((0, 0, 500))
|
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
digit_version.append((int(patch_version[0]) - 1))
digit_version.append(int(patch_version[1]))
return digit_version
|
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
'Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n cfg_options (dict): Options to override some settings in the used\n config.\n\n Returns:\n nn.Module: The constructed detector.\n '
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif (not isinstance(config, mmcv.Config)):
raise TypeError(f'config must be a filename or Config object, but got {type(config)}')
if (cfg_options is not None):
config.merge_from_dict(cfg_options)
if ('pretrained' in config.model):
config.model.pretrained = None
elif ('init_cfg' in config.model.backbone):
config.model.backbone.init_cfg = None
config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if (checkpoint is not None):
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn("Class names are not saved in the checkpoint's meta data, use COCO classes by default.")
model.CLASSES = get_classes('coco')
model.cfg = config
model.to(device)
model.eval()
return model
|
class LoadImage():
'Deprecated.\n\n A simple pipeline to load image.\n '
def __call__(self, results):
'Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n Returns:\n dict: ``results`` will be returned containing loaded image.\n '
warnings.simplefilter('once')
warnings.warn('`LoadImage` is deprecated and will be removed in future releases. You may use `LoadImageFromWebcam` from `mmdet.datasets.pipelines.` instead.')
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
|
def inference_detector(model, imgs):
'Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n Either image files or loaded images.\n\n Returns:\n If imgs is a list or tuple, the same length list type results\n will be returned, otherwise return the detection results directly.\n '
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
device = next(model.parameters()).device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
if isinstance(img, np.ndarray):
data = dict(img=img)
else:
data = dict(img_info=dict(filename=img), img_prefix=None)
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert (not isinstance(m, RoIPool)), 'CPU inference with RoIPool is not supported currently.'
with torch.no_grad():
results = model(return_loss=False, rescale=True, **data)
if (not is_batch):
return results[0]
else:
return results
|
def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None):
'Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list): The detection result, can be either\n (bbox, segm) or just bbox.\n score_thr (float): The threshold to visualize the bboxes and masks.\n title (str): Title of the pyplot figure.\n wait_time (float): Value of waitKey param.\n Default: 0.\n '
if hasattr(model, 'module'):
model = model.module
model.show_result(img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=palette, text_color=(200, 200, 200), mask_color=palette)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.