code
stringlengths
17
6.64M
def test_ttsr_dict(): cfg = dict(type='TTSRDiscriminator', in_channels=3, in_size=160) net = build_component(cfg) net.init_weights(pretrained=None) inputs = torch.rand((2, 3, 160, 160)) output = net(inputs) assert (output.shape == (2, 1)) if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(inputs.cuda()) assert (output.shape == (2, 1)) with pytest.raises(TypeError): net.init_weights(pretrained=[1])
def test_patch_discriminator(): cfg = dict(type='PatchDiscriminator', in_channels=3, base_channels=64, num_conv=3, norm_cfg=dict(type='BN'), init_cfg=dict(type='normal', gain=0.02)) net = build_component(cfg) net.init_weights(pretrained=None) input_shape = (1, 3, 64, 64) img = _demo_inputs(input_shape) output = net(img) assert (output.shape == (1, 1, 6, 6)) if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(img.cuda()) assert (output.shape == (1, 1, 6, 6)) with pytest.raises(TypeError): net.init_weights(pretrained=[1]) cfg = dict(type='PatchDiscriminator', in_channels=1, base_channels=64, num_conv=3, norm_cfg=dict(type='IN'), init_cfg=dict(type='normal', gain=0.02)) net = build_component(cfg) net.init_weights(pretrained=None) input_shape = (1, 1, 64, 64) img = _demo_inputs(input_shape) output = net(img) assert (output.shape == (1, 1, 6, 6)) if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(img.cuda()) assert (output.shape == (1, 1, 6, 6)) with pytest.raises(TypeError): net.init_weights(pretrained=[1]) bad_cfg = copy.deepcopy(cfg) bad_cfg['norm_cfg'] = None with pytest.raises(AssertionError): _ = build_component(bad_cfg) bad_cfg['norm_cfg'] = dict(tp='BN') with pytest.raises(AssertionError): _ = build_component(bad_cfg)
def test_smpatch_discriminator(): cfg = dict(type='SoftMaskPatchDiscriminator', in_channels=3, base_channels=64, num_conv=3, with_spectral_norm=True) net = build_component(cfg) net.init_weights(pretrained=None) input_shape = (1, 3, 64, 64) img = _demo_inputs(input_shape) output = net(img) assert (output.shape == (1, 1, 6, 6)) if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(img.cuda()) assert (output.shape == (1, 1, 6, 6)) with pytest.raises(TypeError): net.init_weights(pretrained=[1]) cfg = dict(type='SoftMaskPatchDiscriminator', in_channels=1, base_channels=64, num_conv=3, with_spectral_norm=True) net = build_component(cfg) net.init_weights(pretrained=None) input_shape = (1, 1, 64, 64) img = _demo_inputs(input_shape) output = net(img) assert (output.shape == (1, 1, 6, 6)) if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(img.cuda()) assert (output.shape == (1, 1, 6, 6)) with pytest.raises(TypeError): net.init_weights(pretrained=[1])
def _demo_inputs(input_shape=(1, 3, 64, 64)): 'Create a superset of inputs needed to run backbone.\n\n Args:\n input_shape (tuple): input batch dimensions.\n Default: (1, 3, 64, 64).\n\n Returns:\n imgs: (Tensor): Images in FloatTensor with desired shapes.\n ' imgs = np.random.random(input_shape) imgs = torch.FloatTensor(imgs) return imgs
def test_max_feature(): conv2d = MaxFeature(16, 16, filter_type='conv2d') x1 = torch.rand(3, 16, 16, 16) y1 = conv2d(x1) assert (y1.shape == (3, 16, 16, 16)) linear = MaxFeature(16, 16, filter_type='linear') x2 = torch.rand(3, 16) y2 = linear(x2) assert (y2.shape == (3, 16)) if torch.cuda.is_available(): x1 = x1.cuda() x2 = x2.cuda() conv2d = conv2d.cuda() linear = linear.cuda() y1 = conv2d(x1) assert (y1.shape == (3, 16, 16, 16)) y2 = linear(x2) assert (y2.shape == (3, 16)) with pytest.raises(ValueError): MaxFeature(12, 12, filter_type='conv1d')
def test_light_cnn(): cfg = dict(type='LightCNN', in_channels=3) net = build_component(cfg) net.init_weights(pretrained=None) inputs = torch.rand((2, 3, 128, 128)) output = net(inputs) assert (output.shape == (2, 1)) if torch.cuda.is_available(): net.init_weights(pretrained=None) net = net.cuda() output = net(inputs.cuda()) assert (output.shape == (2, 1)) with pytest.raises(TypeError): net.init_weights(pretrained=[1])
def test_multi_layer_disc(): with pytest.raises(AssertionError): multi_disc = MultiLayerDiscriminator(3, 236, fc_in_channels=(- 100), out_act_cfg=None) with pytest.raises(TypeError): multi_disc = MultiLayerDiscriminator(3, 256, num_convs=3, stride_list=(1, 2)) input_g = torch.randn(1, 3, 256, 256) multi_disc = MultiLayerDiscriminator(in_channels=3, max_channels=256, fc_in_channels=None) multi_disc.init_weights() disc_pred = multi_disc(input_g) assert (disc_pred.shape == (1, 256, 8, 8)) multi_disc = MultiLayerDiscriminator(in_channels=3, max_channels=256, fc_in_channels=100) assert isinstance(multi_disc.fc.activate, nn.ReLU) multi_disc = MultiLayerDiscriminator(3, 236, fc_in_channels=None) assert multi_disc.with_out_act assert (not multi_disc.with_fc) assert isinstance(multi_disc.conv5.activate, nn.ReLU) multi_disc = MultiLayerDiscriminator(3, 236, fc_in_channels=None, out_act_cfg=None) assert (not multi_disc.conv5.with_activation) with pytest.raises(TypeError): multi_disc.init_weights(pretrained=dict(igccc=4396)) input_g = torch.randn(1, 3, 16, 16) multi_disc = MultiLayerDiscriminator(in_channels=3, max_channels=256, num_convs=2, fc_in_channels=((4 * 4) * 128), fc_out_channels=10, with_spectral_norm=True) multi_disc.init_weights() disc_pred = multi_disc(input_g) assert (disc_pred.shape == (1, 10)) assert multi_disc.conv1.with_spectral_norm assert multi_disc.conv2.with_spectral_norm assert hasattr(multi_disc.fc.linear, 'weight_orig') num_convs = 3 multi_disc = MultiLayerDiscriminator(in_channels=64, max_channels=512, num_convs=num_convs, kernel_size=4, norm_cfg=dict(type='BN'), act_cfg=dict(type='LeakyReLU', negative_slope=0.2), out_act_cfg=dict(type='ReLU'), with_input_norm=False, with_out_convs=True) assert (not multi_disc.conv1.with_norm) assert isinstance(multi_disc.conv1.activate, nn.LeakyReLU) assert (multi_disc.conv1.stride == (2, 2)) for i in range(1, num_convs): assert getattr(multi_disc, f'conv{(i + 1)}').with_norm assert isinstance(getattr(multi_disc, f'conv{(i + 1)}').activate, nn.LeakyReLU) assert (getattr(multi_disc, f'conv{(i + 1)}').stride == (2, 2)) assert multi_disc.conv4.with_norm assert multi_disc.conv4.with_activation assert (multi_disc.conv4.stride == (1, 1)) assert (not multi_disc.conv5.with_norm) assert (not multi_disc.conv5.with_activation) assert (multi_disc.conv5.stride == (1, 1))
def test_unet_disc_with_spectral_norm(): disc = UNetDiscriminatorWithSpectralNorm(in_channels=3) img = torch.randn(1, 3, 16, 16) disc(img) with pytest.raises(TypeError): disc.init_weights(pretrained=233) if torch.cuda.is_available(): disc = disc.cuda() img = img.cuda() disc(img) with pytest.raises(TypeError): disc.init_weights(pretrained=233)
def assert_dict_keys_equal(dictionary, target_keys): 'Check if the keys of the dictionary is equal to the target key set.' assert isinstance(dictionary, dict) assert (set(dictionary.keys()) == set(target_keys))
def assert_tensor_with_shape(tensor, shape): '"Check if the shape of the tensor is equal to the target shape.' assert isinstance(tensor, torch.Tensor) assert (tensor.shape == shape)
def test_plain_refiner(): 'Test PlainRefiner.' model = PlainRefiner() model.init_weights() model.train() (merged, alpha, trimap, raw_alpha) = _demo_inputs_pair() prediction = model(torch.cat([merged, raw_alpha.sigmoid()], 1), raw_alpha) assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64])) if torch.cuda.is_available(): model = PlainRefiner() model.init_weights() model.train() model.cuda() (merged, alpha, trimap, raw_alpha) = _demo_inputs_pair(cuda=True) prediction = model(torch.cat([merged, raw_alpha.sigmoid()], 1), raw_alpha) assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
def _demo_inputs_pair(img_shape=(64, 64), batch_size=1, cuda=False): '\n Create a superset of inputs needed to run refiner.\n\n Args:\n img_shape (tuple): shape of the input image.\n batch_size (int): batch size of the input batch.\n cuda (bool): whether transfer input into gpu.\n ' color_shape = (batch_size, 3, img_shape[0], img_shape[1]) gray_shape = (batch_size, 1, img_shape[0], img_shape[1]) merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32)) alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32)) trimap = torch.from_numpy(np.random.random(gray_shape).astype(np.float32)) raw_alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32)) if cuda: merged = merged.cuda() alpha = alpha.cuda() trimap = trimap.cuda() raw_alpha = raw_alpha.cuda() return (merged, alpha, trimap, raw_alpha)
def test_mlp_refiner(): model_cfg = dict(type='MLPRefiner', in_dim=8, out_dim=3, hidden_list=[8, 8, 8, 8]) mlp = build_component(model_cfg) assert (mlp.__class__.__name__ == 'MLPRefiner') inputs = torch.rand(2, 8) targets = torch.rand(2, 3) if torch.cuda.is_available(): inputs = inputs.cuda() targets = targets.cuda() mlp = mlp.cuda() data_batch = {'in': inputs, 'target': targets} criterion = nn.L1Loss() optimizer = torch.optim.Adam(mlp.parameters(), lr=0.0001) output = mlp.forward(data_batch['in']) assert (output.shape == data_batch['target'].shape) loss = criterion(output, data_batch['target']) optimizer.zero_grad() loss.backward() optimizer.step()
class TestBlur(): @classmethod def setup_class(cls): cls.kernel = [1, 3, 3, 1] cls.pad = (1, 1) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_blur_cuda(self): blur = Blur(self.kernel, self.pad) x = torch.randn((2, 3, 8, 8)) res = blur(x) assert (res.shape == (2, 3, 7, 7))
class TestModStyleConv(): @classmethod def setup_class(cls): cls.default_cfg = dict(in_channels=3, out_channels=1, kernel_size=3, style_channels=5, upsample=True) def test_mod_styleconv_cpu(self): conv = ModulatedStyleConv(**self.default_cfg) input_x = torch.randn((2, 3, 4, 4)) input_style = torch.randn((2, 5)) res = conv(input_x, input_style) assert (res.shape == (2, 1, 8, 8)) _cfg = deepcopy(self.default_cfg) _cfg['upsample'] = False conv = ModulatedStyleConv(**_cfg) input_x = torch.randn((2, 3, 4, 4)) input_style = torch.randn((2, 5)) res = conv(input_x, input_style) assert (res.shape == (2, 1, 4, 4)) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_mod_styleconv_cuda(self): conv = ModulatedStyleConv(**self.default_cfg).cuda() input_x = torch.randn((2, 3, 4, 4)).cuda() input_style = torch.randn((2, 5)).cuda() res = conv(input_x, input_style) assert (res.shape == (2, 1, 8, 8)) _cfg = deepcopy(self.default_cfg) _cfg['upsample'] = False conv = ModulatedStyleConv(**_cfg).cuda() input_x = torch.randn((2, 3, 4, 4)).cuda() input_style = torch.randn((2, 5)).cuda() res = conv(input_x, input_style) assert (res.shape == (2, 1, 4, 4))
class TestToRGB(): @classmethod def setup_class(cls): cls.default_cfg = dict(in_channels=5, style_channels=5, out_channels=3) def test_torgb_cpu(self): model = ModulatedToRGB(**self.default_cfg) input_x = torch.randn((2, 5, 4, 4)) style = torch.randn((2, 5)) res = model(input_x, style) assert (res.shape == (2, 3, 4, 4)) input_x = torch.randn((2, 5, 8, 8)) style = torch.randn((2, 5)) skip = torch.randn(2, 3, 4, 4) res = model(input_x, style, skip) assert (res.shape == (2, 3, 8, 8)) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_torgb_cuda(self): model = ModulatedToRGB(**self.default_cfg).cuda() input_x = torch.randn((2, 5, 4, 4)).cuda() style = torch.randn((2, 5)).cuda() res = model(input_x, style) assert (res.shape == (2, 3, 4, 4)) input_x = torch.randn((2, 5, 8, 8)).cuda() style = torch.randn((2, 5)).cuda() skip = torch.randn(2, 3, 4, 4).cuda() res = model(input_x, style, skip) assert (res.shape == (2, 3, 8, 8))
class TestStyleGAN2Generator(): @classmethod def setup_class(cls): cls.default_cfg = dict(out_size=64, style_channels=16, num_mlps=4, channel_multiplier=1) def test_stylegan2_g_cpu(self): g = StyleGANv2Generator(**self.default_cfg) res = g(None, num_batches=2) assert (res.shape == (2, 3, 64, 64)) truncation_mean = g.get_mean_latent() res = g(None, num_batches=2, randomize_noise=False, truncation=0.7, truncation_latent=truncation_mean) assert (res.shape == (2, 3, 64, 64)) res = g.style_mixing(2, 2, truncation_latent=truncation_mean) assert (res.shape[2] == 64) random_noise = g.make_injected_noise() res = g(None, num_batches=1, injected_noise=random_noise, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) random_noise = g.make_injected_noise() res = g(None, num_batches=1, injected_noise=None, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) styles = [torch.randn((1, 16)) for _ in range(2)] res = g(styles, num_batches=1, injected_noise=None, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) res = g(torch.randn, num_batches=1, injected_noise=None, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) g.eval() assert (g.default_style_mode == 'single') g.train() assert (g.default_style_mode == 'mix') with pytest.raises(AssertionError): styles = [torch.randn((1, 6)) for _ in range(2)] _ = g(styles, injected_noise=None, randomize_noise=False) cfg_ = deepcopy(self.default_cfg) cfg_['out_size'] = 256 g = StyleGANv2Generator(**cfg_) res = g(None, num_batches=2) assert (res.shape == (2, 3, 256, 256)) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_g_cuda(self): g = StyleGANv2Generator(**self.default_cfg).cuda() res = g(None, num_batches=2) assert (res.shape == (2, 3, 64, 64)) random_noise = g.make_injected_noise() res = g(None, num_batches=1, injected_noise=random_noise, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) random_noise = g.make_injected_noise() res = g(None, num_batches=1, injected_noise=None, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) styles = [torch.randn((1, 16)).cuda() for _ in range(2)] res = g(styles, num_batches=1, injected_noise=None, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) res = g(torch.randn, num_batches=1, injected_noise=None, randomize_noise=False) assert (res.shape == (1, 3, 64, 64)) g.eval() assert (g.default_style_mode == 'single') g.train() assert (g.default_style_mode == 'mix') with pytest.raises(AssertionError): styles = [torch.randn((1, 6)).cuda() for _ in range(2)] _ = g(styles, injected_noise=None, randomize_noise=False) cfg_ = deepcopy(self.default_cfg) cfg_['out_size'] = 256 g = StyleGANv2Generator(**cfg_).cuda() res = g(None, num_batches=2) assert (res.shape == (2, 3, 256, 256))
class TestStyleGANv2Disc(): @classmethod def setup_class(cls): cls.default_cfg = dict(in_size=64, channel_multiplier=1) def test_stylegan2_disc_cpu(self): d = StyleGAN2Discriminator(**self.default_cfg) img = torch.randn((2, 3, 64, 64)) score = d(img) assert (score.shape == (2, 1)) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_stylegan2_disc_cuda(self): d = StyleGAN2Discriminator(**self.default_cfg).cuda() img = torch.randn((2, 3, 64, 64)).cuda() score = d(img) assert (score.shape == (2, 1))
def test_get_module_device_cpu(): device = get_module_device(nn.Conv2d(3, 3, 3, 1, 1)) assert (device == torch.device('cpu')) with pytest.raises(ValueError): get_module_device(nn.Flatten())
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_get_module_device_cuda(): module = nn.Conv2d(3, 3, 3, 1, 1).cuda() device = get_module_device(module) assert (device == next(module.parameters()).get_device()) with pytest.raises(ValueError): get_module_device(nn.Flatten().cuda())
def test_res_block(): res_block = ResBlock(16, 32) x = torch.rand(2, 16, 64, 64) y = res_block(x) assert (y.shape == (2, 32, 64, 64)) res_block = ResBlock(16, 16) x = torch.rand(2, 16, 64, 64) y = res_block(x) assert (y.shape == (2, 16, 64, 64))
def test_hour_glass(): hour_glass = Hourglass(2, 16) x = torch.rand(2, 16, 64, 64) y = hour_glass(x) assert (y.shape == x.shape)
def test_feedback_hour_glass(): model_cfg = dict(type='FeedbackHourglass', mid_channels=16, num_keypoints=20) fhg = build_component(model_cfg) assert (fhg.__class__.__name__ == 'FeedbackHourglass') x = torch.rand(2, 3, 64, 64) (heatmap, last_hidden) = fhg.forward(x) assert (heatmap.shape == (2, 20, 16, 16)) assert (last_hidden.shape == (2, 16, 16, 16)) (heatmap, last_hidden) = fhg.forward(x, last_hidden) assert (heatmap.shape == (2, 20, 16, 16)) assert (last_hidden.shape == (2, 16, 16, 16))
def test_reduce_to_five_heatmaps(): heatmap = torch.rand((2, 5, 64, 64)) new_heatmap = reduce_to_five_heatmaps(heatmap, False) assert (new_heatmap.shape == (2, 5, 64, 64)) new_heatmap = reduce_to_five_heatmaps(heatmap, True) assert (new_heatmap.shape == (2, 5, 64, 64)) heatmap = torch.rand((2, 68, 64, 64)) new_heatmap = reduce_to_five_heatmaps(heatmap, False) assert (new_heatmap.shape == (2, 5, 64, 64)) new_heatmap = reduce_to_five_heatmaps(heatmap, True) assert (new_heatmap.shape == (2, 5, 64, 64)) heatmap = torch.rand((2, 194, 64, 64)) new_heatmap = reduce_to_five_heatmaps(heatmap, False) assert (new_heatmap.shape == (2, 5, 64, 64)) new_heatmap = reduce_to_five_heatmaps(heatmap, True) assert (new_heatmap.shape == (2, 5, 64, 64)) with pytest.raises(NotImplementedError): heatmap = torch.rand((2, 12, 64, 64)) reduce_to_five_heatmaps(heatmap, False)
def test_lte(): model_cfg = dict(type='LTE', requires_grad=False, pixel_range=1.0, pretrained=None, load_pretrained_vgg=False) lte = build_component(model_cfg) assert (lte.__class__.__name__ == 'LTE') x = torch.rand(2, 3, 64, 64) (x_level3, x_level2, x_level1) = lte(x) assert (x_level1.shape == (2, 64, 64, 64)) assert (x_level2.shape == (2, 128, 32, 32)) assert (x_level3.shape == (2, 256, 16, 16)) lte.init_weights(None) with pytest.raises(IOError): model_cfg['pretrained'] = '' lte = build_component(model_cfg) (x_level3, x_level2, x_level1) = lte(x) lte.init_weights('') with pytest.raises(TypeError): lte.init_weights(1)
def test_light_cnn_feature_loss(): pretrained = ('https://download.openmmlab.com/mmediting/' + 'restorers/dic/light_cnn_feature.pth') pred = torch.rand((3, 3, 128, 128)) gt = torch.rand((3, 3, 128, 128)) feature_loss = LightCNNFeatureLoss(pretrained=pretrained) loss = feature_loss(pred, gt) assert (loss.item() > 0) feature_loss = LightCNNFeatureLoss(pretrained=pretrained, criterion='mse') loss = feature_loss(pred, gt) assert (loss.item() > 0) if torch.cuda.is_available(): pred = pred.cuda() gt = gt.cuda() feature_loss = feature_loss.cuda() pred.requires_grad = True loss = feature_loss(pred, gt) assert (loss.item() > 0) optim = torch.optim.SGD(params=[pred], lr=10) optim.zero_grad() loss.backward() optim.step() loss_new = feature_loss(pred, gt) assert (loss_new < loss) feature_loss = LightCNNFeatureLoss(pretrained=pretrained, criterion='mse').cuda() loss = feature_loss(pred, gt) assert (loss.item() > 0) with pytest.raises(AssertionError): feature_loss.model.train() feature_loss(pred, gt) with pytest.raises(ValueError): LightCNNFeatureLoss(pretrained=pretrained, criterion='l2') with pytest.raises(AssertionError): LightCNNFeatureLoss(pretrained=None)
def _get_model_cfg(fname): '\n Grab configs necessary to create a model. These are deep copied to allow\n for safe modification of parameters without influencing other tests.\n ' config_dpath = 'configs/mattors' config_fpath = osp.join(config_dpath, fname) if (not osp.exists(config_dpath)): raise Exception('Cannot find config path') config = mmcv.Config.fromfile(config_fpath) return (config.model, config.train_cfg, config.test_cfg)
def assert_dict_keys_equal(dictionary, target_keys): 'Check if the keys of the dictionary is equal to the target key set.' assert isinstance(dictionary, dict) assert (set(dictionary.keys()) == set(target_keys))
@patch.multiple(BaseMattor, __abstractmethods__=set()) def test_base_mattor(): backbone = dict(type='SimpleEncoderDecoder', encoder=dict(type='VGG16', in_channels=4), decoder=dict(type='PlainDecoder')) refiner = dict(type='PlainRefiner') train_cfg = mmcv.ConfigDict(train_backbone=True, train_refiner=True) test_cfg = mmcv.ConfigDict(refine=True, metrics=['SAD', 'MSE', 'GRAD', 'CONN']) with pytest.raises(KeyError): BaseMattor(backbone, refiner, train_cfg.copy(), test_cfg=mmcv.ConfigDict(refine=True)) with pytest.raises(KeyError): BaseMattor(backbone, refiner, train_cfg.copy(), test_cfg=mmcv.ConfigDict(refine=True, metrics=['UnsupportedMetric'])) with pytest.raises(TypeError): BaseMattor(backbone, refiner, train_cfg.copy(), test_cfg=mmcv.ConfigDict(refine=True, metrics='SAD')) mattor = BaseMattor(backbone, refiner=None, train_cfg=None, test_cfg=test_cfg.copy()) assert (not mattor.with_refiner) mattor = BaseMattor(backbone, refiner, train_cfg=mmcv.ConfigDict(train_backbone=False, train_refiner=True), test_cfg=test_cfg.copy()) assert (not mattor.train_cfg.train_backbone) assert mattor.train_cfg.train_refiner assert mattor.test_cfg.refine mattor = BaseMattor(backbone, refiner, train_cfg=mmcv.ConfigDict(train_backbone=True, train_refiner=False), test_cfg=test_cfg.copy()) assert mattor.train_cfg.train_backbone assert (not mattor.train_cfg.train_refiner) assert mattor.test_cfg.refine
def test_dim(): (model_cfg, train_cfg, test_cfg) = _get_model_cfg('dim/dim_stage3_v16_pln_1x1_1000k_comp1k.py') model_cfg['pretrained'] = None train_cfg.train_refiner = True test_cfg.refine = True model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) input_train = _demo_input_train((64, 64)) output_train = model(**input_train) assert (output_train['num_samples'] == 1) assert_dict_keys_equal(output_train['losses'], ['loss_alpha', 'loss_comp', 'loss_refine']) if torch.cuda.is_available(): model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) model.cuda() input_train = _demo_input_train((64, 64), cuda=True) output_train = model(**input_train) assert (output_train['num_samples'] == 1) assert_dict_keys_equal(output_train['losses'], ['loss_alpha', 'loss_comp', 'loss_refine']) with torch.no_grad(): model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) input_test = _demo_input_test((64, 64)) output_test = model(**input_test, test_mode=True) assert isinstance(output_test['pred_alpha'], np.ndarray) assert_dict_keys_equal(output_test['eval_result'], ['SAD', 'MSE', 'GRAD', 'CONN']) if torch.cuda.is_available(): model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) model.cuda() input_test = _demo_input_test((64, 64), cuda=True) output_test = model(**input_test, test_mode=True) assert isinstance(output_test['pred_alpha'], np.ndarray) assert_dict_keys_equal(output_test['eval_result'], ['SAD', 'MSE', 'GRAD', 'CONN']) model_cfg['refiner'] = None test_cfg['metrics'] = None model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) input_train = _demo_input_train((64, 64)) output_train = model(**input_train) assert (output_train['num_samples'] == 1) assert_dict_keys_equal(output_train['losses'], ['loss_alpha', 'loss_comp']) if torch.cuda.is_available(): model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) model.cuda() input_train = _demo_input_train((64, 64), cuda=True) output_train = model(**input_train) assert (output_train['num_samples'] == 1) assert_dict_keys_equal(output_train['losses'], ['loss_alpha', 'loss_comp']) with torch.no_grad(): model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) input_test = _demo_input_test((64, 64)) output_test = model(**input_test, test_mode=True) assert isinstance(output_test['pred_alpha'], np.ndarray) assert (output_test['eval_result'] is None) if torch.cuda.is_available(): model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) model.cuda() input_test = _demo_input_test((64, 64), cuda=True) output_test = model(**input_test, test_mode=True) assert isinstance(output_test['pred_alpha'], np.ndarray) assert (output_test['eval_result'] is None) model.cpu().eval() inputs = torch.ones((1, 4, 32, 32)) model.forward_dummy(inputs)
def test_indexnet(): (model_cfg, _, test_cfg) = _get_model_cfg('indexnet/indexnet_mobv2_1x16_78k_comp1k.py') model_cfg['pretrained'] = None with torch.no_grad(): indexnet = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) indexnet.eval() input_test = _demo_input_test((64, 64)) output_test = indexnet(**input_test, test_mode=True) assert isinstance(output_test['pred_alpha'], np.ndarray) assert (output_test['pred_alpha'].shape == (64, 64)) assert_dict_keys_equal(output_test['eval_result'], ['SAD', 'MSE', 'GRAD', 'CONN']) if torch.cuda.is_available(): indexnet = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg).cuda() indexnet.eval() input_test = _demo_input_test((64, 64), cuda=True) output_test = indexnet(**input_test, test_mode=True) assert isinstance(output_test['pred_alpha'], np.ndarray) assert (output_test['pred_alpha'].shape == (64, 64)) assert_dict_keys_equal(output_test['eval_result'], ['SAD', 'MSE', 'GRAD', 'CONN']) model_cfg.loss_alpha = None model_cfg.loss_comp = dict(type='L1CompositionLoss') indexnet = build_model(model_cfg, train_cfg=mmcv.ConfigDict(train_backbone=True), test_cfg=test_cfg) input_train = _demo_input_train((64, 64), batch_size=2) output_train = indexnet(**input_train) assert (output_train['num_samples'] == 2) assert_dict_keys_equal(output_train['losses'], ['loss_comp']) if torch.cuda.is_available(): model_cfg.loss_alpha = dict(type='L1Loss') model_cfg.loss_comp = None indexnet = build_model(model_cfg, train_cfg=mmcv.ConfigDict(train_backbone=True), test_cfg=test_cfg).cuda() input_train = _demo_input_train((64, 64), batch_size=2, cuda=True) output_train = indexnet(**input_train) assert (output_train['num_samples'] == 2) assert_dict_keys_equal(output_train['losses'], ['loss_alpha']) indexnet.cpu().eval() inputs = torch.ones((1, 4, 32, 32)) indexnet.forward_dummy(inputs)
def test_gca(): (model_cfg, train_cfg, test_cfg) = _get_model_cfg('gca/gca_r34_4x10_200k_comp1k.py') model_cfg['pretrained'] = None model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) inputs = _demo_input_train((64, 64), batch_size=2) inputs['trimap'] = inputs['trimap'].expand_as(inputs['merged']) inputs['meta'][0]['to_onehot'] = True outputs = model(inputs['merged'], inputs['trimap'], inputs['meta'], inputs['alpha']) assert (outputs['num_samples'] == 2) assert_dict_keys_equal(outputs['losses'], ['loss']) if torch.cuda.is_available(): model = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) model.cuda() inputs = _demo_input_train((64, 64), batch_size=2, cuda=True) inputs['trimap'] = inputs['trimap'].expand_as(inputs['merged']) inputs['meta'][0]['to_onehot'] = True outputs = model(inputs['merged'], inputs['trimap'], inputs['meta'], inputs['alpha']) assert (outputs['num_samples'] == 2) assert_dict_keys_equal(outputs['losses'], ['loss']) with torch.no_grad(): model_cfg.backbone.encoder.in_channels = 4 model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) inputs = _demo_input_test((64, 64)) outputs = model(**inputs, test_mode=True) assert_dict_keys_equal(outputs['eval_result'], ['SAD', 'MSE', 'GRAD', 'CONN']) if torch.cuda.is_available(): model = build_model(model_cfg, train_cfg=None, test_cfg=test_cfg) model.cuda() inputs = _demo_input_test((64, 64), cuda=True) outputs = model(**inputs, test_mode=True) assert_dict_keys_equal(outputs['eval_result'], ['SAD', 'MSE', 'GRAD', 'CONN']) model.cpu().eval() inputs = torch.ones((1, 4, 32, 32)) model.forward_dummy(inputs)
def _demo_input_train(img_shape, batch_size=1, cuda=False): '\n Create a superset of inputs needed to run backbone.\n\n Args:\n img_shape (tuple): shape of the input image.\n batch_size (int): batch size of the input batch.\n cuda (bool): whether transfer input into gpu.\n ' color_shape = (batch_size, 3, img_shape[0], img_shape[1]) gray_shape = (batch_size, 1, img_shape[0], img_shape[1]) merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32)) trimap = torch.from_numpy(np.random.randint(255, size=gray_shape).astype(np.float32)) meta = ([{}] * batch_size) alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32)) ori_merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32)) fg = torch.from_numpy(np.random.random(color_shape).astype(np.float32)) bg = torch.from_numpy(np.random.random(color_shape).astype(np.float32)) if cuda: merged = merged.cuda() trimap = trimap.cuda() alpha = alpha.cuda() ori_merged = ori_merged.cuda() fg = fg.cuda() bg = bg.cuda() return dict(merged=merged, trimap=trimap, meta=meta, alpha=alpha, ori_merged=ori_merged, fg=fg, bg=bg)
def _demo_input_test(img_shape, batch_size=1, cuda=False, test_trans='resize'): '\n Create a superset of inputs needed to run backbone.\n\n Args:\n img_shape (tuple): shape of the input image.\n batch_size (int): batch size of the input batch.\n cuda (bool): whether transfer input into gpu.\n test_trans (str): what test transformation is used in data pipeline.\n ' color_shape = (batch_size, 3, img_shape[0], img_shape[1]) gray_shape = (batch_size, 1, img_shape[0], img_shape[1]) merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32)) trimap = torch.from_numpy(np.random.randint(255, size=gray_shape).astype(np.float32)) ori_alpha = np.random.random(img_shape).astype(np.float32) ori_trimap = np.random.randint(256, size=img_shape).astype(np.float32) if cuda: merged = merged.cuda() trimap = trimap.cuda() meta = ([dict(ori_alpha=ori_alpha, ori_trimap=ori_trimap, merged_ori_shape=img_shape)] * batch_size) if (test_trans == 'pad'): meta[0]['pad'] = (0, 0) elif (test_trans == 'resize'): meta[0]['interpolation'] = 'bilinear' return dict(merged=merged, trimap=trimap, meta=meta)
def test_basic_restorer(): model_cfg = dict(type='BasicRestorer', generator=dict(type='MSRResNet', in_channels=3, out_channels=3, mid_channels=4, num_blocks=1, upscale_factor=4), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'BasicRestorer') assert isinstance(restorer.generator, MSRResNet) assert isinstance(restorer.pixel_loss, L1Loss) inputs = torch.rand(1, 3, 20, 20) targets = torch.rand(1, 3, 80, 80) data_batch = {'lq': inputs, 'gt': targets} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters()))} outputs = restorer(**data_batch, test_mode=False) assert isinstance(outputs, dict) assert isinstance(outputs['losses'], dict) assert isinstance(outputs['losses']['loss_pix'], torch.FloatTensor) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 80, 80)) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 80, 80)) with torch.no_grad(): output = restorer.forward_dummy(data_batch['lq']) assert torch.is_tensor(output) assert (output.size() == (1, 3, 80, 80)) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 80, 80)) if torch.cuda.is_available(): restorer = restorer.cuda() optimizer['generator'] = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} outputs = restorer(**data_batch, test_mode=False) assert isinstance(outputs, dict) assert isinstance(outputs['losses'], dict) assert isinstance(outputs['losses']['loss_pix'], torch.cuda.FloatTensor) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 80, 80)) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 80, 80)) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 80, 80)) test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0) test_cfg = mmcv.Config(test_cfg) data_batch = {'lq': inputs, 'gt': targets, 'meta': [{'lq_path': 'fake_path/fake_name.png'}]} restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with pytest.raises(AssertionError): restorer(lq=inputs, test_mode=True) with tempfile.TemporaryDirectory() as tmpdir: outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=None) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=100) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) with pytest.raises(ValueError): restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration='100')
def test_basicvsr_model(): model_cfg = dict(type='BasicVSR', generator=dict(type='BasicVSRNet', mid_channels=64, num_blocks=30, spynet_pretrained=None), pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum')) train_cfg = dict(fix_iter=1) train_cfg = mmcv.Config(train_cfg) test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'BasicVSR') assert isinstance(restorer.generator, BasicVSRNet) assert isinstance(restorer.pixel_loss, MSELoss) inputs = torch.rand(1, 5, 3, 64, 64) targets = torch.rand(1, 5, 3, 256, 256) if torch.cuda.is_available(): inputs = inputs.cuda() targets = targets.cuda() restorer = restorer.cuda() data_batch = {'lq': inputs, 'gt': targets} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters()))} outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 5, 3, 256, 256)) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 5, 3, 256, 256)) with torch.no_grad(): output = restorer.forward_dummy(data_batch['lq']) assert torch.is_tensor(output) assert (output.size() == (1, 5, 3, 256, 256)) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 5, 3, 256, 256)) with torch.no_grad(): outputs = restorer(inputs, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 5, 3, 256, 256)) train_cfg = mmcv.ConfigDict(fix_iter=1) test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0) test_cfg = mmcv.Config(test_cfg) data_batch = {'lq': inputs, 'gt': targets, 'meta': [{'gt_path': 'fake_path/fake_name.png', 'key': '000'}]} restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) if torch.cuda.is_available(): restorer = restorer.cuda() with pytest.raises(AssertionError): restorer(lq=inputs, test_mode=True) with tempfile.TemporaryDirectory() as tmpdir: outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=None) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=100) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) with pytest.raises(ValueError): restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration='100') model_cfg = dict(type='BasicVSR', generator=dict(type='BasicVSRNet', mid_channels=64, num_blocks=30, spynet_pretrained=None), pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'), ensemble=dict(type='SpatialTemporalEnsemble', is_temporal_ensemble=False)) train_cfg = dict(fix_iter=1) train_cfg = mmcv.Config(train_cfg) test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) inputs = torch.rand(1, 5, 3, 64, 64) targets = torch.rand(1, 5, 3, 256, 256) if torch.cuda.is_available(): inputs = inputs.cuda() targets = targets.cuda() restorer = restorer.cuda() data_batch = {'lq': inputs, 'gt': targets} with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 5, 3, 256, 256)) model_cfg = dict(type='BasicVSR', generator=dict(type='BasicVSRNet', mid_channels=64, num_blocks=30, spynet_pretrained=None), pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'), ensemble=dict(type='abc', is_temporal_ensemble=False)) with pytest.raises(NotImplementedError): restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
def test_dic_model(): pretrained = ('https://download.openmmlab.com/mmediting/' + 'restorers/dic/light_cnn_feature.pth') model_cfg_pre = dict(type='DIC', generator=dict(type='DICNet', in_channels=3, out_channels=3, mid_channels=48), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), align_loss=dict(type='MSELoss', loss_weight=0.1, reduction='mean')) model_cfg = dict(type='DIC', generator=dict(type='DICNet', in_channels=3, out_channels=3, mid_channels=48), discriminator=dict(type='LightCNN', in_channels=3), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), align_loss=dict(type='MSELoss', loss_weight=0.1, reduction='mean'), feature_loss=dict(type='LightCNNFeatureLoss', pretrained=pretrained, loss_weight=0.1, criterion='l1'), gan_loss=dict(type='GANLoss', gan_type='vanilla', loss_weight=0.005, real_label_val=1.0, fake_label_val=0)) scale = 8 train_cfg = None test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale)) build_model(model_cfg_pre, train_cfg=train_cfg, test_cfg=test_cfg) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'DIC') inputs = torch.rand(1, 3, 16, 16) targets = torch.rand(1, 3, 128, 128) heatmap = torch.rand(1, 68, 32, 32) data_batch = {'lq': inputs, 'gt': targets, 'heatmap': heatmap} optim_cfg = dict(type='Adam', lr=0.0001, betas=(0.9, 0.999)) generator = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) discriminator = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) optimizer = dict(generator=generator, discriminator=discriminator) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pixel_v3'], float) assert (outputs['num_samples'] == 1) assert (outputs['results']['lq'].shape == data_batch['lq'].shape) assert (outputs['results']['gt'].shape == data_batch['gt'].shape) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) if torch.cuda.is_available(): restorer = restorer.cuda() data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'heatmap': heatmap.cuda()} optim_cfg = dict(type='Adam', lr=0.0001, betas=(0.9, 0.999)) generator = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) discriminator = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) optimizer = dict(generator=generator, discriminator=discriminator) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pixel_v3'], float) assert (outputs['num_samples'] == 1) assert (outputs['results']['lq'].shape == data_batch['lq'].shape) assert (outputs['results']['gt'].shape == data_batch['gt'].shape) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) data_batch.pop('heatmap') result = restorer.val_step(data_batch, meta=[{'gt_path': ''}]) assert isinstance(result, dict) assert isinstance(result['eval_result'], dict) assert (result['eval_result'].keys() == set({'PSNR', 'SSIM'})) assert isinstance(result['eval_result']['PSNR'], np.float64) assert isinstance(result['eval_result']['SSIM'], np.float64) with pytest.raises(AssertionError): restorer(lq=inputs.cuda(), test_mode=True) with pytest.raises(TypeError): restorer.init_weights(pretrained=1) with pytest.raises(OSError): restorer.init_weights(pretrained='')
def test_edvr_model(): model_cfg = dict(type='EDVR', generator=dict(type='EDVRNet', in_channels=3, out_channels=3, mid_channels=8, num_frames=5, deform_groups=2, num_blocks_extraction=1, num_blocks_reconstruction=1, center_frame_idx=2, with_tsa=False), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='sum')) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'EDVR') assert isinstance(restorer.generator, EDVRNet) assert isinstance(restorer.pixel_loss, L1Loss) inputs = torch.rand(1, 5, 3, 8, 8) targets = torch.rand(1, 3, 32, 32) if torch.cuda.is_available(): restorer = restorer.cuda() data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters()))} outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 32, 32)) model_cfg['generator']['with_tsa'] = True with pytest.raises(KeyError): train_cfg = dict(other_conent='xxx') restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda() outputs = restorer.train_step(data_batch, optimizer) train_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda() outputs = restorer.train_step(data_batch, optimizer) train_cfg = mmcv.ConfigDict(tsa_iter=1) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda() optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters()))} outputs = restorer.train_step(data_batch, optimizer) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 32, 32)) with torch.no_grad(): output = restorer.forward_dummy(data_batch['lq']) assert torch.is_tensor(output) assert (output.size() == (1, 3, 32, 32)) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 32, 32)) with torch.no_grad(): outputs = restorer(inputs.cuda(), test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 32, 32)) if torch.cuda.is_available(): train_cfg = mmcv.ConfigDict(tsa_iter=1) test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0) test_cfg = mmcv.Config(test_cfg) data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'meta': [{'gt_path': 'fake_path/fake_name.png', 'key': '000/00000000'}]} restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda() with pytest.raises(AssertionError): restorer(lq=inputs.cuda(), test_mode=True) with tempfile.TemporaryDirectory() as tmpdir: outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=None) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=100) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) with pytest.raises(ValueError): restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration='100')
def test_esrgan(): model_cfg = dict(type='ESRGAN', generator=dict(type='MSRResNet', in_channels=3, out_channels=3, mid_channels=4, num_blocks=1, upscale_factor=4), discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), gan_loss=dict(type='GANLoss', gan_type='vanilla', real_label_val=1.0, fake_label_val=0, loss_weight=0.005)) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'ESRGAN') assert isinstance(restorer.generator, MSRResNet) assert isinstance(restorer.discriminator, ModifiedVGG) assert isinstance(restorer.pixel_loss, L1Loss) assert isinstance(restorer.gan_loss, GANLoss) inputs = torch.rand(1, 3, 32, 32) targets = torch.rand(1, 3, 128, 128) data_batch = {'lq': inputs, 'gt': targets} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) if torch.cuda.is_available(): restorer = restorer.cuda() optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0).cuda(), torch.tensor(2.0).cuda())): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu()} train_cfg = dict(disc_steps=2, disc_init_steps=2) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) model_cfg_ = model_cfg.copy() model_cfg_.pop('pixel_loss') restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(None, torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(2.0), None)): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128))
def test_glean(): model_cfg = dict(type='GLEAN', generator=dict(type='GLEANStyleGANv2', in_size=16, out_size=64, style_channels=512), discriminator=dict(type='StyleGAN2Discriminator', in_size=64), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), gan_loss=dict(type='GANLoss', gan_type='vanilla', real_label_val=1.0, fake_label_val=0, loss_weight=0.005)) train_cfg = None test_cfg = mmcv.Config(dict(metrics=['PSNR'], crop_border=0)) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) inputs = torch.rand(1, 3, 16, 16) targets = torch.rand(1, 3, 64, 64) data_batch = {'lq': inputs, 'gt': targets} restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) meta = [{'lq_path': ''}] with pytest.raises(ValueError): with torch.no_grad(): restorer(**data_batch, test_mode=True, save_image=True, meta=meta, iteration='1') with pytest.raises(AssertionError): with torch.no_grad(): data_batch.pop('gt') restorer(**data_batch, test_mode=True) if torch.cuda.is_available(): data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} restorer = restorer.cuda() with pytest.raises(ValueError): with torch.no_grad(): restorer(**data_batch, test_mode=True, save_image=True, meta=meta, iteration='1') with pytest.raises(AssertionError): with torch.no_grad(): data_batch.pop('gt') restorer(**data_batch, test_mode=True)
@COMPONENTS.register_module() class BP(nn.Module): 'A simple BP network for testing LIIF.\n\n Args:\n in_dim (int): Input dimension.\n out_dim (int): Output dimension.\n ' def __init__(self, in_dim, out_dim): super().__init__() self.layer = nn.Linear(in_dim, out_dim) def forward(self, x): shape = x.shape[:(- 1)] x = self.layer(x.view((- 1), x.shape[(- 1)])) return x.view(*shape, (- 1))
def test_liif(): model_cfg = dict(type='LIIF', generator=dict(type='LIIFEDSR', encoder=dict(type='EDSR', in_channels=3, out_channels=3, mid_channels=64, num_blocks=16), imnet=dict(type='MLPRefiner', in_dim=64, out_dim=3, hidden_list=[256, 256, 256, 256]), local_ensemble=True, feat_unfold=True, cell_decode=True, eval_bsize=30000), rgb_mean=(0.4488, 0.4371, 0.404), rgb_std=(1.0, 1.0, 1.0), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) scale_max = 4 train_cfg = None test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max)) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'LIIF') inputs = torch.rand(1, 3, 22, 11) targets = torch.rand(1, (128 * 64), 3) coord = torch.rand(1, (128 * 64), 2) cell = torch.rand(1, (128 * 64), 2) data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell} optim_cfg = dict(type='Adam', lr=0.0001, betas=(0.9, 0.999)) optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert (outputs['results']['lq'].shape == data_batch['lq'].shape) assert (outputs['results']['gt'].shape == data_batch['gt'].shape) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, (128 * 64), 3)) if torch.cuda.is_available(): restorer = restorer.cuda() data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'coord': coord.cuda(), 'cell': cell.cuda()} optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert (outputs['results']['lq'].shape == data_batch['lq'].shape) assert (outputs['results']['gt'].shape == data_batch['gt'].shape) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, (128 * 64), 3)) result = restorer.val_step(data_batch, meta=[{'gt_path': ''}]) assert isinstance(result, dict) assert isinstance(result['eval_result'], dict) assert (result['eval_result'].keys() == set({'PSNR', 'SSIM'})) assert isinstance(result['eval_result']['PSNR'], np.float64) assert isinstance(result['eval_result']['SSIM'], np.float64)
def test_real_basicvsr(): model_cfg = dict(type='RealBasicVSR', generator=dict(type='RealBasicVSRNet'), discriminator=dict(type='UNetDiscriminatorWithSpectralNorm', in_channels=3, mid_channels=64, skip_connection=True), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), cleaning_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), gan_loss=dict(type='GANLoss', gan_type='vanilla', loss_weight=0.1, real_label_val=1.0, fake_label_val=0), is_use_sharpened_gt_in_pixel=True, is_use_sharpened_gt_in_percep=True, is_use_sharpened_gt_in_gan=True, is_use_ema=True) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'RealBasicVSR') assert isinstance(restorer.generator, RealBasicVSRNet) assert isinstance(restorer.discriminator, UNetDiscriminatorWithSpectralNorm) assert isinstance(restorer.pixel_loss, L1Loss) assert isinstance(restorer.gan_loss, GANLoss) inputs = torch.rand(1, 5, 3, 64, 64) targets = torch.rand(1, 5, 3, 256, 256) data_batch = {'lq': inputs, 'gt': targets, 'gt_unsharp': targets} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} with pytest.raises(ValueError): restorer(**data_batch, test_mode=False) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix', 'loss_clean']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (5, 3, 256, 256)) if torch.cuda.is_available(): restorer = restorer.cuda() optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'gt_unsharp': targets.cuda()} with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0).cuda(), torch.tensor(2.0).cuda())): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix', 'loss_clean']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (5, 3, 256, 256)) data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu(), 'gt_unsharp': targets.cpu()} train_cfg = dict(disc_steps=2, disc_init_steps=2, start_iter=0) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (5, 3, 256, 256)) model_cfg_ = model_cfg.copy() model_cfg_.pop('pixel_loss') restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (5, 3, 256, 256)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(None, torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix', 'loss_clean']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (5, 3, 256, 256)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(2.0), None)): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix', 'loss_clean']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (5, 3, 256, 256))
def test_real_esrgan(): model_cfg = dict(type='RealESRGAN', generator=dict(type='MSRResNet', in_channels=3, out_channels=3, mid_channels=4, num_blocks=1, upscale_factor=4), discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), gan_loss=dict(type='GANLoss', gan_type='vanilla', loss_weight=0.1, real_label_val=1.0, fake_label_val=0), is_use_sharpened_gt_in_pixel=True, is_use_sharpened_gt_in_percep=True, is_use_sharpened_gt_in_gan=True, is_use_ema=True) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'RealESRGAN') assert isinstance(restorer.generator, MSRResNet) assert isinstance(restorer.discriminator, ModifiedVGG) assert isinstance(restorer.pixel_loss, L1Loss) assert isinstance(restorer.gan_loss, GANLoss) inputs = torch.rand(1, 3, 32, 32) targets = torch.rand(1, 3, 128, 128) data_batch = {'lq': inputs, 'gt': targets, 'gt_unsharp': targets} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} with pytest.raises(ValueError): restorer(**data_batch, test_mode=False) data_batch.pop('gt_unsharp') with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with torch.no_grad(): output = restorer.forward_dummy(data_batch['lq']) assert torch.is_tensor(output) assert (output.size() == (1, 3, 128, 128)) with torch.no_grad(): outputs = restorer.val_step(data_batch) data_batch['gt_unsharp'] = targets assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) if torch.cuda.is_available(): restorer = restorer.cuda() optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'gt_unsharp': targets.cuda()} data_batch.pop('gt_unsharp') with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with torch.no_grad(): outputs = restorer.val_step(data_batch) data_batch['gt_unsharp'] = targets.cuda() assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0).cuda(), torch.tensor(2.0).cuda())): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu(), 'gt_unsharp': targets.cpu()} train_cfg = dict(disc_steps=2, disc_init_steps=2, start_iter=0) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) model_cfg_ = model_cfg.copy() model_cfg_.pop('discriminator') restorer = build_model(model_cfg_, train_cfg=train_cfg, test_cfg=test_cfg) data_batch.pop('gt_unsharp') with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) data_batch['gt_unsharp'] = targets.cpu() assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) model_cfg_ = model_cfg.copy() model_cfg_.pop('pixel_loss') restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(None, torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(2.0), None)): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128))
def test_srgan(): model_cfg = dict(type='SRGAN', generator=dict(type='MSRResNet', in_channels=3, out_channels=3, mid_channels=4, num_blocks=1, upscale_factor=4), discriminator=dict(type='ModifiedVGG', in_channels=3, mid_channels=2), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), gan_loss=dict(type='GANLoss', gan_type='vanilla', real_label_val=1.0, fake_label_val=0, loss_weight=0.005)) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'SRGAN') assert isinstance(restorer.generator, MSRResNet) assert isinstance(restorer.discriminator, ModifiedVGG) assert isinstance(restorer.pixel_loss, L1Loss) assert isinstance(restorer.gan_loss, GANLoss) inputs = torch.rand(1, 3, 32, 32) targets = torch.rand(1, 3, 128, 128) data_batch = {'lq': inputs, 'gt': targets} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} with pytest.raises(ValueError): restorer(**data_batch, test_mode=False) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with torch.no_grad(): output = restorer.forward_dummy(data_batch['lq']) assert torch.is_tensor(output) assert (output.size() == (1, 3, 128, 128)) with torch.no_grad(): outputs = restorer.val_step(data_batch) assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) if torch.cuda.is_available(): restorer = restorer.cuda() optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'discriminator').parameters()))} data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with torch.no_grad(): outputs = restorer.val_step(data_batch) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0).cuda(), torch.tensor(2.0).cuda())): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) data_batch = {'lq': inputs.cpu(), 'gt': targets.cpu()} train_cfg = dict(disc_steps=2, disc_init_steps=2) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(1.0), torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) model_cfg_ = model_cfg.copy() model_cfg_.pop('discriminator') restorer = build_model(model_cfg_, train_cfg=train_cfg, test_cfg=test_cfg) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) model_cfg_ = model_cfg.copy() model_cfg_.pop('pixel_loss') restorer = build_model(model_cfg_, train_cfg=None, test_cfg=None) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_gan', 'loss_d_real', 'loss_d_fake']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(None, torch.tensor(2.0))): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_style', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) restorer = build_model(model_cfg, train_cfg=None, test_cfg=None) with patch.object(restorer, 'perceptual_loss', return_value=(torch.tensor(2.0), None)): outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) for v in ['loss_perceptual', 'loss_gan', 'loss_d_real', 'loss_d_fake', 'loss_pix']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq']) assert torch.equal(outputs['results']['gt'], data_batch['gt']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128))
def test_tdan_model(): model_cfg = dict(type='TDAN', generator=dict(type='TDANNet', in_channels=3, mid_channels=64, out_channels=3, num_blocks_before_align=5, num_blocks_after_align=10), pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum'), lq_pixel_loss=dict(type='MSELoss', loss_weight=1.0, reduction='sum')) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'TDAN') assert isinstance(restorer.generator, TDANNet) assert isinstance(restorer.pixel_loss, MSELoss) inputs = torch.rand(1, 5, 3, 8, 8) targets = torch.rand(1, 3, 32, 32) if torch.cuda.is_available(): restorer = restorer.cuda() data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(restorer, 'generator').parameters()))} outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['results']['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 32, 32)) with torch.no_grad(): output = restorer.forward_dummy(data_batch['lq']) assert isinstance(output, tuple) assert torch.is_tensor(output[0]) assert (output[0].size() == (1, 3, 32, 32)) assert torch.is_tensor(output[1]) assert (output[1].size() == (1, 5, 3, 8, 8)) with torch.no_grad(): outputs = restorer(**data_batch, test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.equal(outputs['gt'], data_batch['gt'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 32, 32)) with torch.no_grad(): outputs = restorer(inputs.cuda(), test_mode=True) assert torch.equal(outputs['lq'], data_batch['lq'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 32, 32)) if torch.cuda.is_available(): train_cfg = mmcv.ConfigDict(tsa_iter=1) test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0) test_cfg = mmcv.Config(test_cfg) data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'meta': [{'gt_path': 'fake_path/fake_name.png', 'key': '000/00000000'}]} restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg).cuda() with pytest.raises(AssertionError): restorer(lq=inputs.cuda(), test_mode=True) with tempfile.TemporaryDirectory() as tmpdir: outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=None) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=100) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) with pytest.raises(ValueError): restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration='100')
def test_sfe(): inputs = torch.rand(2, 3, 48, 48) sfe = SFE(3, 64, 16, 1.0) outputs = sfe(inputs) assert (outputs.shape == (2, 64, 48, 48))
def test_csfi(): inputs1 = torch.rand(2, 16, 24, 24) inputs2 = torch.rand(2, 16, 48, 48) inputs4 = torch.rand(2, 16, 96, 96) csfi2 = CSFI2(mid_channels=16) (out1, out2) = csfi2(inputs1, inputs2) assert (out1.shape == (2, 16, 24, 24)) assert (out2.shape == (2, 16, 48, 48)) csfi3 = CSFI3(mid_channels=16) (out1, out2, out4) = csfi3(inputs1, inputs2, inputs4) assert (out1.shape == (2, 16, 24, 24)) assert (out2.shape == (2, 16, 48, 48)) assert (out4.shape == (2, 16, 96, 96))
def test_merge_features(): inputs1 = torch.rand(2, 16, 24, 24) inputs2 = torch.rand(2, 16, 48, 48) inputs4 = torch.rand(2, 16, 96, 96) merge_features = MergeFeatures(mid_channels=16, out_channels=3) out = merge_features(inputs1, inputs2, inputs4) assert (out.shape == (2, 3, 96, 96))
def test_ttsr_net(): inputs = torch.rand(2, 3, 24, 24) soft_attention = torch.rand(2, 1, 24, 24) t_level3 = torch.rand(2, 64, 24, 24) t_level2 = torch.rand(2, 32, 48, 48) t_level1 = torch.rand(2, 16, 96, 96) ttsr_cfg = dict(type='TTSRNet', in_channels=3, out_channels=3, mid_channels=16, texture_channels=16) ttsr = build_backbone(ttsr_cfg) outputs = ttsr(inputs, soft_attention, (t_level3, t_level2, t_level1)) assert (outputs.shape == (2, 3, 96, 96))
def test_ttsr(): model_cfg = dict(type='TTSR', generator=dict(type='TTSRNet', in_channels=3, out_channels=3, mid_channels=64, num_blocks=(16, 16, 8, 4)), extractor=dict(type='LTE', load_pretrained_vgg=False), transformer=dict(type='SearchTransformer'), discriminator=dict(type='TTSRDiscriminator', in_size=64), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), perceptual_loss=dict(type='PerceptualLoss', layer_weights={'29': 1.0}, vgg_type='vgg19', perceptual_weight=0.01, style_weight=0.001, criterion='mse'), transferal_perceptual_loss=dict(type='TransferalPerceptualLoss', loss_weight=0.01, use_attention=False, criterion='mse'), gan_loss=dict(type='GANLoss', gan_type='vanilla', loss_weight=0.001, real_label_val=1.0, fake_label_val=0)) scale = 4 train_cfg = None test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale)) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) model_cfg = dict(type='TTSR', generator=dict(type='TTSRNet', in_channels=3, out_channels=3, mid_channels=64, num_blocks=(16, 16, 8, 4)), extractor=dict(type='LTE'), transformer=dict(type='SearchTransformer'), discriminator=dict(type='TTSRDiscriminator', in_size=64), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'), perceptual_loss=dict(type='PerceptualLoss', layer_weights={'29': 1.0}, vgg_type='vgg19', perceptual_weight=0.01, style_weight=0.001, criterion='mse'), transferal_perceptual_loss=dict(type='TransferalPerceptualLoss', loss_weight=0.01, use_attention=False, criterion='mse'), gan_loss=dict(type='GANLoss', gan_type='vanilla', loss_weight=0.001, real_label_val=1.0, fake_label_val=0)) scale = 4 train_cfg = None test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale)) restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'TTSR') inputs = torch.rand(1, 3, 16, 16) targets = torch.rand(1, 3, 64, 64) ref = torch.rand(1, 3, 64, 64) data_batch = {'lq': inputs, 'gt': targets, 'ref': ref, 'lq_up': ref, 'ref_downup': ref} optim_cfg_g = dict(type='Adam', lr=0.0001, betas=(0.9, 0.999)) optim_cfg_d = dict(type='Adam', lr=0.0001, betas=(0.9, 0.999)) optimizer = dict(generator=obj_from_dict(optim_cfg_g, torch.optim, dict(params=restorer.parameters())), discriminator=obj_from_dict(optim_cfg_d, torch.optim, dict(params=restorer.parameters()))) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert (outputs['results']['lq'].shape == data_batch['lq'].shape) assert (outputs['results']['gt'].shape == data_batch['gt'].shape) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 64, 64)) if torch.cuda.is_available(): restorer = restorer.cuda() data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda(), 'ref': ref.cuda(), 'lq_up': ref.cuda(), 'ref_downup': ref.cuda()} optimizer = dict(generator=obj_from_dict(optim_cfg_g, torch.optim, dict(params=restorer.parameters())), discriminator=obj_from_dict(optim_cfg_d, torch.optim, dict(params=restorer.parameters()))) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert (outputs['results']['lq'].shape == data_batch['lq'].shape) assert (outputs['results']['gt'].shape == data_batch['gt'].shape) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 64, 64)) result = restorer.val_step(data_batch, meta=[{'gt_path': ''}]) assert isinstance(result, dict) assert isinstance(result['eval_result'], dict) assert (result['eval_result'].keys() == set({'PSNR', 'SSIM'})) assert isinstance(result['eval_result']['PSNR'], np.float64) assert isinstance(result['eval_result']['SSIM'], np.float64)
def test_cyclegan(): model_cfg = dict(type='CycleGAN', generator=dict(type='ResnetGenerator', in_channels=3, out_channels=3, base_channels=64, norm_cfg=dict(type='IN'), use_dropout=False, num_blocks=9, padding_mode='reflect', init_cfg=dict(type='normal', gain=0.02)), discriminator=dict(type='PatchDiscriminator', in_channels=3, base_channels=64, num_conv=3, norm_cfg=dict(type='IN'), init_cfg=dict(type='normal', gain=0.02)), gan_loss=dict(type='GANLoss', gan_type='lsgan', real_label_val=1.0, fake_label_val=0, loss_weight=1.0), cycle_loss=dict(type='L1Loss', loss_weight=10.0, reduction='mean'), id_loss=dict(type='L1Loss', loss_weight=0.5, reduction='mean')) train_cfg = None test_cfg = None synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with pytest.raises(AssertionError): bad_model_cfg = copy.deepcopy(model_cfg) bad_model_cfg['generator']['out_channels'] = 1 _ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with pytest.raises(AssertionError): bad_model_cfg = copy.deepcopy(model_cfg) bad_model_cfg['gan_loss'] = None _ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with pytest.raises(AssertionError): bad_model_cfg = copy.deepcopy(model_cfg) bad_model_cfg['cycle_loss'] = None _ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (synthesizer.__class__.__name__ == 'CycleGAN') assert isinstance(synthesizer.generators['a'], ResnetGenerator) assert isinstance(synthesizer.generators['b'], ResnetGenerator) assert isinstance(synthesizer.discriminators['a'], PatchDiscriminator) assert isinstance(synthesizer.discriminators['b'], PatchDiscriminator) assert isinstance(synthesizer.gan_loss, GANLoss) assert isinstance(synthesizer.cycle_loss, L1Loss) assert isinstance(synthesizer.id_loss, L1Loss) assert (synthesizer.train_cfg is None) assert (synthesizer.test_cfg is None) inputs = torch.rand(1, 3, 64, 64) targets = torch.rand(1, 3, 64, 64) data_batch = {'img_a': inputs, 'img_b': targets} img_meta = {} img_meta['img_a_path'] = 'img_a_path' img_meta['img_b_path'] = 'img_b_path' data_batch['meta'] = [img_meta] optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)) optimizer = {'generators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generators').parameters())), 'discriminators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminators').parameters()))} with torch.no_grad(): output = synthesizer.forward_dummy(data_batch['img_a']) assert torch.is_tensor(output) assert (output.size() == (1, 3, 64, 64)) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) with torch.no_grad(): outputs = synthesizer.val_step(data_batch) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) outputs = synthesizer(inputs, targets, [img_meta], test_mode=False) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert torch.is_tensor(outputs['rec_a']) assert torch.is_tensor(outputs['rec_b']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert (outputs['rec_a'].size() == (1, 3, 64, 64)) assert (outputs['rec_b'].size() == (1, 3, 64, 64)) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) if torch.cuda.is_available(): synthesizer = synthesizer.cuda() optimizer = {'generators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generators').parameters())), 'discriminators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminators').parameters()))} data_batch_cuda = copy.deepcopy(data_batch) data_batch_cuda['img_a'] = inputs.cuda() data_batch_cuda['img_b'] = targets.cuda() data_batch_cuda['meta'] = [DC(img_meta, cpu_only=True).data] with torch.no_grad(): outputs = synthesizer(data_batch_cuda['img_a'], data_batch_cuda['img_b'], data_batch_cuda['meta'], test_mode=True) assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu()) assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu()) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) with torch.no_grad(): outputs = synthesizer.val_step(data_batch_cuda) assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu()) assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu()) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) outputs = synthesizer(data_batch_cuda['img_a'], data_batch_cuda['img_b'], data_batch_cuda['meta'], test_mode=False) assert torch.equal(outputs['real_a'], data_batch_cuda['img_a']) assert torch.equal(outputs['real_b'], data_batch_cuda['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert torch.is_tensor(outputs['rec_a']) assert torch.is_tensor(outputs['rec_b']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert (outputs['rec_a'].size() == (1, 3, 64, 64)) assert (outputs['rec_b'].size() == (1, 3, 64, 64)) outputs = synthesizer.train_step(data_batch_cuda, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch_cuda['img_a'].cpu()) assert torch.equal(outputs['results']['real_b'], data_batch_cuda['img_b'].cpu()) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() train_cfg = dict(disc_steps=2, disc_init_steps=2) synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) optimizer = {'generators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generators').parameters())), 'discriminators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminators').parameters()))} for i in range(2): assert (synthesizer.step_counter == i) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b']: assert (outputs['log_vars'].get(v) is None) assert isinstance(outputs['log_vars']['loss_gan_d_a'], float) assert isinstance(outputs['log_vars']['loss_gan_d_b'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) assert (synthesizer.step_counter == (i + 1)) for i in range(2, 6): assert (synthesizer.step_counter == i) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) log_check_list = ['loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'] if ((i % 2) == 1): log_None_list = ['loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'] for v in log_None_list: assert (outputs['log_vars'].get(v) is None) log_check_list.remove(v) for v in log_check_list: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) assert (synthesizer.step_counter == (i + 1)) model_cfg_ = copy.deepcopy(model_cfg) model_cfg_.pop('id_loss') synthesizer = build_model(model_cfg_, train_cfg=None, test_cfg=None) optimizer = {'generators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generators').parameters())), 'discriminators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminators').parameters()))} data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) assert (outputs['log_vars'].get('loss_id_a') is None) assert (outputs['log_vars'].get('loss_id_b') is None) log_check_list = ['loss_gan_d_a', 'loss_gan_d_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b'] for v in log_check_list: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() train_cfg = dict(direction='b2a') synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) optimizer = {'generators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generators').parameters())), 'discriminators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminators').parameters()))} assert (synthesizer.step_counter == 0) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_b']) assert torch.equal(outputs['results']['real_b'], data_batch['img_a']) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) assert (synthesizer.step_counter == 1) data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() train_cfg = dict(buffer_size=0) synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) optimizer = {'generators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generators').parameters())), 'discriminators': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminators').parameters()))} assert (synthesizer.step_counter == 0) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_a', 'loss_gan_d_b', 'loss_id_a', 'loss_id_b', 'loss_gan_g_a', 'loss_gan_g_b', 'loss_cycle_a', 'loss_cycle_b']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert torch.is_tensor(outputs['results']['fake_a']) assert (outputs['results']['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['results']['fake_a'].size() == (1, 3, 64, 64)) assert (synthesizer.step_counter == 1) train_cfg = None test_cfg = dict(show_input=True) synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(mmcv, 'imwrite', return_value=True): with pytest.raises(AssertionError): with torch.no_grad(): _ = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path') assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert outputs['saved_flag'] with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path', iteration=1000) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert outputs['saved_flag'] train_cfg = None test_cfg = dict(show_input=False, test_direction='a2b') synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(mmcv, 'imwrite', return_value=True): with pytest.raises(AssertionError): with torch.no_grad(): _ = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path') assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert outputs['saved_flag'] with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path', iteration=1000) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert outputs['saved_flag'] train_cfg = None test_cfg = dict(show_input=False, test_direction='b2a') synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(mmcv, 'imwrite', return_value=True): with pytest.raises(AssertionError): with torch.no_grad(): _ = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path') assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert outputs['saved_flag'] with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path', iteration=1000) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert torch.is_tensor(outputs['fake_a']) assert (outputs['fake_b'].size() == (1, 3, 64, 64)) assert (outputs['fake_a'].size() == (1, 3, 64, 64)) assert outputs['saved_flag']
def test_search_transformer(): model_cfg = dict(type='SearchTransformer') model = build_component(model_cfg) lr_pad_level3 = torch.randn((2, 32, 32, 32)) ref_pad_level3 = torch.randn((2, 32, 32, 32)) ref_level3 = torch.randn((2, 32, 32, 32)) ref_level2 = torch.randn((2, 16, 64, 64)) ref_level1 = torch.randn((2, 8, 128, 128)) (s, textures) = model(lr_pad_level3, ref_pad_level3, (ref_level3, ref_level2, ref_level1)) (t_level3, t_level2, t_level1) = textures assert (s.shape == (2, 1, 32, 32)) assert (t_level3.shape == (2, 32, 32, 32)) assert (t_level2.shape == (2, 16, 64, 64)) assert (t_level1.shape == (2, 8, 128, 128))
def test_cain(): model_cfg = dict(type='CAIN', generator=dict(type='CAINNet'), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')) train_cfg = None test_cfg = None restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (restorer.__class__.__name__ == 'CAIN') assert isinstance(restorer.generator, CAINNet) assert isinstance(restorer.pixel_loss, L1Loss) inputs = torch.rand(1, 2, 3, 128, 128) target = torch.rand(1, 3, 128, 128) data_batch = {'inputs': inputs, 'target': target, 'meta': [{'key': '001'}]} optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters()))} with torch.no_grad(): outputs = restorer.forward_test(**data_batch) assert torch.equal(outputs['inputs'], data_batch['inputs']) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['inputs'], data_batch['inputs']) assert torch.equal(outputs['results']['target'], data_batch['target']) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) if torch.cuda.is_available(): restorer = restorer.cuda() optimizer['generator'] = obj_from_dict(optim_cfg, torch.optim, dict(params=restorer.parameters())) data_batch = {'inputs': inputs.cuda(), 'target': target.cuda(), 'meta': [{'key': '001'}]} with torch.no_grad(): outputs = restorer.forward_test(**data_batch) assert torch.equal(outputs['inputs'], data_batch['inputs'].cpu()) assert torch.is_tensor(outputs['output']) assert (outputs['output'].size() == (1, 3, 128, 128)) outputs = restorer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['log_vars']['loss_pix'], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['inputs'], data_batch['inputs'].cpu()) assert torch.equal(outputs['results']['target'], data_batch['target'].cpu()) assert torch.is_tensor(outputs['results']['output']) assert (outputs['results']['output'].size() == (1, 3, 128, 128)) test_cfg = dict(metrics=('PSNR', 'SSIM'), crop_border=0) test_cfg = mmcv.Config(test_cfg) data_batch = {'inputs': inputs, 'target': target, 'meta': [{'key': 'fake_path/fake_name'}]} restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with pytest.raises(AssertionError): restorer(inputs=inputs, test_mode=True) with tempfile.TemporaryDirectory() as tmpdir: outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=None) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float) outputs = restorer(**data_batch, test_mode=True, save_image=True, save_path=tmpdir, iteration=100) assert isinstance(outputs, dict) assert isinstance(outputs['eval_result'], dict) assert isinstance(outputs['eval_result']['PSNR'], float) assert isinstance(outputs['eval_result']['SSIM'], float)
def test_init_random_seed(): init_random_seed(0, device='cpu') init_random_seed(device='cpu') if torch.cuda.is_available(): init_random_seed(0, device='cuda') init_random_seed(device='cuda')
def test_set_random_seed(): set_random_seed(0, deterministic=False) set_random_seed(0, deterministic=True)
@DATASETS.register_module() class ToyDataset(): def __init__(self, ann_file=None, cnt=0): self.ann_file = ann_file self.cnt = cnt def __item__(self, idx): return idx def __len__(self): return 100
@DATASETS.register_module() class ToyDatasetWithAnnFile(): def __init__(self, ann_file): self.ann_file = ann_file def __item__(self, idx): return idx def __len__(self): return 100
def test_build_dataset(): cfg = dict(type='ToyDataset') dataset = build_dataset(cfg) assert isinstance(dataset, ToyDataset) assert (dataset.cnt == 0) dataset = build_dataset(cfg, default_args=dict(cnt=1)) assert isinstance(dataset, ToyDataset) assert (dataset.cnt == 1) cfg = dict(type='RepeatDataset', dataset=dict(type='ToyDataset'), times=3) dataset = build_dataset(cfg) assert isinstance(dataset, RepeatDataset) assert isinstance(dataset.dataset, ToyDataset) assert (dataset.times == 3) cfg = dict(type='ToyDatasetWithAnnFile', ann_file=['ann_file_a', 'ann_file_b']) dataset = build_dataset(cfg) assert isinstance(dataset, ConcatDataset) assert isinstance(dataset.datasets, list) assert isinstance(dataset.datasets[0], ToyDatasetWithAnnFile) assert (dataset.datasets[0].ann_file == 'ann_file_a') assert isinstance(dataset.datasets[1], ToyDatasetWithAnnFile) assert (dataset.datasets[1].ann_file == 'ann_file_b') cfg = (dict(type='ToyDataset'), dict(type='ToyDatasetWithAnnFile', ann_file='ann_file')) dataset = build_dataset(cfg) assert isinstance(dataset, ConcatDataset) assert isinstance(dataset.datasets, list) assert isinstance(dataset.datasets[0], ToyDataset) assert isinstance(dataset.datasets[1], ToyDatasetWithAnnFile)
def test_build_dataloader(): dataset = ToyDataset() samples_per_gpu = 3 dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2) assert (dataloader.batch_size == samples_per_gpu) assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu)))) assert isinstance(dataloader.sampler, DistributedSampler) assert dataloader.sampler.shuffle dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2, shuffle=False) assert (dataloader.batch_size == samples_per_gpu) assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu)))) assert isinstance(dataloader.sampler, DistributedSampler) assert (not dataloader.sampler.shuffle) dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2, num_gpus=8) assert (dataloader.batch_size == samples_per_gpu) assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu)))) assert (dataloader.num_workers == 2) dataloader = build_dataloader(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=2, dist=False) assert (dataloader.batch_size == samples_per_gpu) assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu)))) assert isinstance(dataloader.sampler, RandomSampler) assert (dataloader.num_workers == 2) dataloader = build_dataloader(dataset, samples_per_gpu=3, workers_per_gpu=2, shuffle=False, dist=False) assert (dataloader.batch_size == samples_per_gpu) assert (len(dataloader) == int(math.ceil((len(dataset) / samples_per_gpu)))) assert isinstance(dataloader.sampler, SequentialSampler) assert (dataloader.num_workers == 2) dataloader = build_dataloader(dataset, samples_per_gpu=3, workers_per_gpu=2, num_gpus=8, dist=False) assert (dataloader.batch_size == (samples_per_gpu * 8)) assert (len(dataloader) == int(math.ceil(((len(dataset) / samples_per_gpu) / 8)))) assert isinstance(dataloader.sampler, RandomSampler) assert (dataloader.num_workers == 16)
class SimpleModule(nn.Module): def __init__(self): super().__init__() self.a = nn.Parameter(torch.tensor([1.0, 2.0])) if (version.parse(torch.__version__) >= version.parse('1.7.0')): self.register_buffer('b', torch.tensor([2.0, 3.0]), persistent=True) self.register_buffer('c', torch.tensor([0.0, 1.0]), persistent=False) else: self.register_buffer('b', torch.tensor([2.0, 3.0])) self.c = torch.tensor([0.0, 1.0])
class SimpleModel(nn.Module): def __init__(self) -> None: super().__init__() self.module_a = SimpleModule() self.module_b = SimpleModule() self.module_a_ema = SimpleModule() self.module_b_ema = SimpleModule()
class SimpleModelNoEMA(nn.Module): def __init__(self) -> None: super().__init__() self.module_a = SimpleModule() self.module_b = SimpleModule()
class SimpleRunner(): def __init__(self): self.model = SimpleModel() self.iter = 0
class TestEMA(): @classmethod def setup_class(cls): cls.default_config = dict(module_keys=('module_a_ema', 'module_b_ema'), interval=1, interp_cfg=dict(momentum=0.5)) cls.runner = SimpleRunner() @torch.no_grad() def test_ema_hook(self): cfg_ = deepcopy(self.default_config) cfg_['interval'] = (- 1) ema = ExponentialMovingAverageHook(**cfg_) ema.before_run(self.runner) ema.after_train_iter(self.runner) module_a = self.runner.model.module_a module_a_ema = self.runner.model.module_a_ema ema_states = module_a_ema.state_dict() assert torch.equal(ema_states['a'], torch.tensor([1.0, 2.0])) ema = ExponentialMovingAverageHook(**self.default_config) ema.after_train_iter(self.runner) ema_states = module_a_ema.state_dict() assert torch.equal(ema_states['a'], torch.tensor([1.0, 2.0])) module_a.b /= 2.0 module_a.a.data /= 2.0 module_a.c /= 2.0 self.runner.iter += 1 ema.after_train_iter(self.runner) ema_states = module_a_ema.state_dict() assert torch.equal(self.runner.model.module_a.a, torch.tensor([0.5, 1.0])) assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5])) assert torch.equal(ema_states['b'], torch.tensor([1.0, 1.5])) assert ('c' not in ema_states) with pytest.raises(AssertionError): _ = ExponentialMovingAverageHook(module_keys=['a']) with pytest.raises(AssertionError): _ = ExponentialMovingAverageHook(module_keys='a') with pytest.raises(AssertionError): _ = ExponentialMovingAverageHook(module_keys='module_a_ema', interp_mode='xxx') ema = ExponentialMovingAverageHook(**self.default_config) self.runner.model = SimpleModelNoEMA() self.runner.iter = 0 ema.before_run(self.runner) assert hasattr(self.runner.model, 'module_a_ema') module_a = self.runner.model.module_a module_a_ema = self.runner.model.module_a_ema ema.after_train_iter(self.runner) ema_states = module_a_ema.state_dict() assert torch.equal(ema_states['a'], torch.tensor([1.0, 2.0])) module_a.b /= 2.0 module_a.a.data /= 2.0 module_a.c /= 2.0 self.runner.iter += 1 ema.after_train_iter(self.runner) ema_states = module_a_ema.state_dict() assert torch.equal(self.runner.model.module_a.a, torch.tensor([0.5, 1.0])) assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5])) assert torch.equal(ema_states['b'], torch.tensor([1.0, 1.5])) assert ('c' not in ema_states) runner = SimpleRunner() cfg_ = deepcopy(self.default_config) cfg_.update(dict(start_iter=3, interval=1)) ema = ExponentialMovingAverageHook(**cfg_) ema.before_run(runner) module_a = runner.model.module_a module_a_ema = runner.model.module_a_ema module_a.a.data /= 2.0 runner.iter += 1 ema.after_train_iter(runner) ema_states = module_a_ema.state_dict() assert torch.equal(runner.model.module_a.a, torch.tensor([0.5, 1.0])) assert torch.equal(ema_states['a'], torch.tensor([0.5, 1.0])) module_a.a.data /= 2 runner.iter += 2 ema.after_train_iter(runner) ema_states = module_a_ema.state_dict() assert torch.equal(runner.model.module_a.a, torch.tensor([0.25, 0.5])) assert torch.equal(ema_states['a'], torch.tensor([0.375, 0.75])) @pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda') def test_ema_hook_cuda(self): ema = ExponentialMovingAverageHook(**self.default_config) cuda_runner = SimpleRunner() cuda_runner.model = cuda_runner.model.cuda() ema.after_train_iter(cuda_runner) module_a = cuda_runner.model.module_a module_a_ema = cuda_runner.model.module_a_ema ema_states = module_a_ema.state_dict() assert torch.equal(ema_states['a'], torch.tensor([1.0, 2.0]).cuda()) module_a.b /= 2.0 module_a.a.data /= 2.0 module_a.c /= 2.0 cuda_runner.iter += 1 ema.after_train_iter(cuda_runner) ema_states = module_a_ema.state_dict() assert torch.equal(cuda_runner.model.module_a.a, torch.tensor([0.5, 1.0]).cuda()) assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5]).cuda()) assert torch.equal(ema_states['b'], torch.tensor([1.0, 1.5]).cuda()) assert ('c' not in ema_states) ema = ExponentialMovingAverageHook(**self.default_config) self.runner.model = SimpleModelNoEMA().cuda() self.runner.model = DataParallel(self.runner.model) self.runner.iter = 0 ema.before_run(self.runner) assert hasattr(self.runner.model.module, 'module_a_ema') module_a = self.runner.model.module.module_a module_a_ema = self.runner.model.module.module_a_ema ema.after_train_iter(self.runner) ema_states = module_a_ema.state_dict() assert torch.equal(ema_states['a'], torch.tensor([1.0, 2.0]).cuda()) module_a.b /= 2.0 module_a.a.data /= 2.0 module_a.c /= 2.0 self.runner.iter += 1 ema.after_train_iter(self.runner) ema_states = module_a_ema.state_dict() assert torch.equal(self.runner.model.module.module_a.a, torch.tensor([0.5, 1.0]).cuda()) assert torch.equal(ema_states['a'], torch.tensor([0.75, 1.5]).cuda()) assert torch.equal(ema_states['b'], torch.tensor([1.0, 1.5]).cuda()) assert ('c' not in ema_states) runner = SimpleRunner() runner.model = runner.model.cuda() cfg_ = deepcopy(self.default_config) cfg_.update(dict(start_iter=3, interval=1)) ema = ExponentialMovingAverageHook(**cfg_) ema.before_run(runner) module_a = runner.model.module_a module_a_ema = runner.model.module_a_ema module_a.a.data /= 2.0 runner.iter += 1 ema.after_train_iter(runner) ema_states = module_a_ema.state_dict() assert torch.equal(runner.model.module_a.a, torch.tensor([0.5, 1.0]).cuda()) assert torch.equal(ema_states['a'], torch.tensor([0.5, 1.0]).cuda()) module_a.a.data /= 2 runner.iter += 2 ema.after_train_iter(runner) ema_states = module_a_ema.state_dict() assert torch.equal(runner.model.module_a.a, torch.tensor([0.25, 0.5]).cuda()) assert torch.equal(ema_states['a'], torch.tensor([0.375, 0.75]).cuda())
class ExampleDataset(Dataset): def __getitem__(self, idx): results = dict(imgs=torch.tensor([1])) return results def __len__(self): return 1
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.test_cfg = None self.conv = nn.Conv2d(3, 3, 3) def forward(self, imgs, test_mode=False, **kwargs): return imgs def train_step(self, data_batch, optimizer): rlt = self.forward(data_batch) return dict(result=rlt)
def test_eval_hook(): with pytest.raises(TypeError): test_dataset = ExampleModel() data_loader = [DataLoader(test_dataset, batch_size=1, sampler=None, num_worker=0, shuffle=False)] EvalIterHook(data_loader) test_dataset = ExampleDataset() test_dataset.evaluate = MagicMock(return_value=dict(test='success')) loader = DataLoader(test_dataset, batch_size=1) model = ExampleModel() data_loader = DataLoader(test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False) eval_hook = EvalIterHook(data_loader) optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.9, 0.999)) optimizer = obj_from_dict(optim_cfg, torch.optim, dict(params=model.parameters())) with tempfile.TemporaryDirectory() as tmpdir: runner = mmcv.runner.IterBasedRunner(model=model, optimizer=optimizer, work_dir=tmpdir, logger=logging.getLogger()) runner.register_hook(eval_hook) runner.run([loader], [('train', 1)], 1) test_dataset.evaluate.assert_called_with([torch.tensor([1])], logger=runner.logger)
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.model1 = nn.Conv2d(3, 8, kernel_size=3) self.model2 = nn.Conv2d(3, 4, kernel_size=3) def forward(self, x): return x
def test_build_optimizers(): base_lr = 0.0001 base_wd = 0.0002 momentum = 0.9 optimizer_cfg = dict(model1=dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum), model2=dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum)) model = ExampleModel() optimizers = build_optimizers(model, optimizer_cfg) param_dict = dict(model.named_parameters()) assert isinstance(optimizers, dict) for i in range(2): optimizer = optimizers[f'model{(i + 1)}'] param_groups = optimizer.param_groups[0] assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) assert (len(param_groups['params']) == 2) assert torch.equal(param_groups['params'][0], param_dict[f'model{(i + 1)}.weight']) assert torch.equal(param_groups['params'][1], param_dict[f'model{(i + 1)}.bias']) model = torch.nn.DataParallel(ExampleModel()) optimizers = build_optimizers(model, optimizer_cfg) param_dict = dict(model.named_parameters()) assert isinstance(optimizers, dict) for i in range(2): optimizer = optimizers[f'model{(i + 1)}'] param_groups = optimizer.param_groups[0] assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) assert (len(param_groups['params']) == 2) assert torch.equal(param_groups['params'][0], param_dict[f'module.model{(i + 1)}.weight']) assert torch.equal(param_groups['params'][1], param_dict[f'module.model{(i + 1)}.bias']) optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) model = ExampleModel() optimizer = build_optimizers(model, optimizer_cfg) param_dict = dict(model.named_parameters()) assert isinstance(optimizers, dict) param_groups = optimizer.param_groups[0] assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) assert (len(param_groups['params']) == 4) assert torch.equal(param_groups['params'][0], param_dict['model1.weight']) assert torch.equal(param_groups['params'][1], param_dict['model1.bias']) assert torch.equal(param_groups['params'][2], param_dict['model2.weight']) assert torch.equal(param_groups['params'][3], param_dict['model2.bias']) model = torch.nn.DataParallel(ExampleModel()) optimizer = build_optimizers(model, optimizer_cfg) param_dict = dict(model.named_parameters()) assert isinstance(optimizers, dict) param_groups = optimizer.param_groups[0] assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) assert (len(param_groups['params']) == 4) assert torch.equal(param_groups['params'][0], param_dict['module.model1.weight']) assert torch.equal(param_groups['params'][1], param_dict['module.model1.bias']) assert torch.equal(param_groups['params'][2], param_dict['module.model2.weight']) assert torch.equal(param_groups['params'][3], param_dict['module.model2.bias'])
def test_bbox_mask(): cfg = dict(img_shape=(256, 256), max_bbox_shape=100, max_bbox_delta=10, min_margin=10) bbox = random_bbox(**cfg) mask_bbox = bbox2mask(cfg['img_shape'], bbox) assert (mask_bbox.shape == (256, 256, 1)) zero_area = np.sum((mask_bbox == 0).astype(np.uint8)) ones_area = np.sum((mask_bbox == 1).astype(np.uint8)) assert ((zero_area + ones_area) == (256 * 256)) assert (mask_bbox.dtype == np.uint8) with pytest.raises(ValueError): cfg_ = cfg.copy() cfg_['max_bbox_shape'] = 300 bbox = random_bbox(**cfg_) with pytest.raises(ValueError): cfg_ = cfg.copy() cfg_['max_bbox_delta'] = 300 bbox = random_bbox(**cfg_) with pytest.raises(ValueError): cfg_ = cfg.copy() cfg_['max_bbox_shape'] = 254 bbox = random_bbox(**cfg_) cfg_ = cfg.copy() cfg_['max_bbox_delta'] = 1 bbox = random_bbox(**cfg_) mask_bbox = bbox2mask(cfg['img_shape'], bbox) assert (mask_bbox.shape == (256, 256, 1))
def test_free_form_mask(): img_shape = (256, 256, 3) for _ in range(10): mask = brush_stroke_mask(img_shape) assert (mask.shape == (256, 256, 1)) img_shape = (256, 256, 3) mask = brush_stroke_mask(img_shape, num_vertices=8) assert (mask.shape == (256, 256, 1)) zero_area = np.sum((mask == 0).astype(np.uint8)) ones_area = np.sum((mask == 1).astype(np.uint8)) assert ((zero_area + ones_area) == (256 * 256)) assert (mask.dtype == np.uint8) img_shape = (256, 256, 3) mask = brush_stroke_mask(img_shape, brush_width=10) assert (mask.shape == (256, 256, 1)) with pytest.raises(TypeError): mask = brush_stroke_mask(img_shape, num_vertices=dict()) with pytest.raises(TypeError): mask = brush_stroke_mask(img_shape, brush_width=dict())
def test_irregular_mask(): img_shape = (256, 256) for _ in range(10): mask = get_irregular_mask(img_shape) assert (mask.shape == (256, 256, 1)) assert (0.15 < (np.sum(mask) / (img_shape[0] * img_shape[1])) < 0.5) zero_area = np.sum((mask == 0).astype(np.uint8)) ones_area = np.sum((mask == 1).astype(np.uint8)) assert ((zero_area + ones_area) == (256 * 256)) assert (mask.dtype == np.uint8) with pytest.raises(TypeError): mask = get_irregular_mask(img_shape, brush_width=dict()) with pytest.raises(TypeError): mask = get_irregular_mask(img_shape, length_range=dict()) with pytest.raises(TypeError): mask = get_irregular_mask(img_shape, num_vertices=dict()) mask = get_irregular_mask(img_shape, brush_width=10) assert (mask.shape == (256, 256, 1)) mask = get_irregular_mask(img_shape, length_range=10) assert (mask.shape == (256, 256, 1)) mask = get_irregular_mask(img_shape, num_vertices=10) assert (mask.shape == (256, 256, 1))
def test_modify_args(): def _parse_args(): parser = argparse.ArgumentParser(description='Generation demo') parser.add_argument('--config-path', help='test config file path') args = parser.parse_args() return args with patch('argparse._sys.argv', ['test.py', '--config_path=config.py']): modify_args() args = _parse_args() assert (args.config_path == 'config.py')
@pytest.mark.skipif((torch.__version__ == 'parrots'), reason='skip parrots.') @pytest.mark.skipif((version.parse(torch.__version__) < version.parse('1.4.0')), reason='skip if torch=1.3.x') def test_restorer_wrapper(): try: import onnxruntime as ort from mmedit.core.export.wrappers import ONNXRuntimeEditing, ONNXRuntimeRestorer except ImportError: pytest.skip('ONNXRuntime is not available.') onnx_path = 'tmp.onnx' scale = 4 train_cfg = None test_cfg = None cfg = dict(model=dict(type='BasicRestorer', generator=dict(type='SRCNN', channels=(3, 4, 2, 3), kernel_sizes=(9, 1, 5), upscale_factor=scale), pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')), train_cfg=train_cfg, test_cfg=test_cfg) cfg = mmcv.Config(cfg) pytorch_model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) inputs = torch.rand(1, 3, 2, 2) targets = torch.rand(1, 3, 8, 8) data_batch = {'lq': inputs, 'gt': targets} pytorch_model.forward = pytorch_model.forward_dummy with torch.no_grad(): torch.onnx.export(pytorch_model, inputs, onnx_path, input_names=['input'], output_names=['output'], export_params=True, keep_initializers_as_inputs=False, verbose=False, opset_version=11) wrap_model = ONNXRuntimeEditing(onnx_path, cfg, 0) assert isinstance(wrap_model.wrapper, ONNXRuntimeRestorer) if (ort.get_device() == 'GPU'): data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()} with torch.no_grad(): outputs = wrap_model(**data_batch, test_mode=True) assert isinstance(outputs, dict) assert ('output' in outputs) output = outputs['output'] assert isinstance(output, torch.Tensor) assert (output.shape == targets.shape)
@pytest.mark.skipif((torch.__version__ == 'parrots'), reason='skip parrots.') @pytest.mark.skipif((version.parse(torch.__version__) < version.parse('1.4.0')), reason='skip if torch=1.3.x') def test_mattor_wrapper(): try: import onnxruntime as ort from mmedit.core.export.wrappers import ONNXRuntimeEditing, ONNXRuntimeMattor except ImportError: pytest.skip('ONNXRuntime is not available.') onnx_path = 'tmp.onnx' train_cfg = None test_cfg = dict(refine=False, metrics=['SAD', 'MSE', 'GRAD', 'CONN']) cfg = dict(model=dict(type='DIM', backbone=dict(type='SimpleEncoderDecoder', encoder=dict(type='VGG16', in_channels=4), decoder=dict(type='PlainDecoder')), pretrained='open-mmlab://mmedit/vgg16', loss_alpha=dict(type='CharbonnierLoss', loss_weight=0.5), loss_comp=dict(type='CharbonnierCompLoss', loss_weight=0.5)), train_cfg=train_cfg, test_cfg=test_cfg) cfg = mmcv.Config(cfg) pytorch_model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) img_shape = (32, 32) merged = torch.rand(1, 3, img_shape[1], img_shape[0]) trimap = torch.rand(1, 1, img_shape[1], img_shape[0]) data_batch = {'merged': merged, 'trimap': trimap} inputs = torch.cat([merged, trimap], dim=1) pytorch_model.forward = pytorch_model.forward_dummy with torch.no_grad(): torch.onnx.export(pytorch_model, inputs, onnx_path, input_names=['input'], output_names=['output'], export_params=True, keep_initializers_as_inputs=False, verbose=False, opset_version=11) wrap_model = ONNXRuntimeEditing(onnx_path, cfg, 0) os.remove(onnx_path) assert isinstance(wrap_model.wrapper, ONNXRuntimeMattor) if (ort.get_device() == 'GPU'): merged = merged.cuda() trimap = trimap.cuda() data_batch = {'merged': merged, 'trimap': trimap} ori_alpha = np.random.random(img_shape).astype(np.float32) ori_trimap = np.random.randint(256, size=img_shape).astype(np.float32) data_batch['meta'] = [dict(ori_alpha=ori_alpha, ori_trimap=ori_trimap, merged_ori_shape=img_shape)] with torch.no_grad(): outputs = wrap_model(**data_batch, test_mode=True) assert isinstance(outputs, dict) assert ('pred_alpha' in outputs) pred_alpha = outputs['pred_alpha'] assert isinstance(pred_alpha, np.ndarray) assert (pred_alpha.shape == img_shape)
def test_pix2pix(): model_cfg = dict(type='Pix2Pix', generator=dict(type='UnetGenerator', in_channels=3, out_channels=3, num_down=8, base_channels=64, norm_cfg=dict(type='BN'), use_dropout=True, init_cfg=dict(type='normal', gain=0.02)), discriminator=dict(type='PatchDiscriminator', in_channels=6, base_channels=64, num_conv=3, norm_cfg=dict(type='BN'), init_cfg=dict(type='normal', gain=0.02)), gan_loss=dict(type='GANLoss', gan_type='vanilla', real_label_val=1.0, fake_label_val=0, loss_weight=1.0), pixel_loss=dict(type='L1Loss', loss_weight=100.0, reduction='mean')) train_cfg = None test_cfg = None synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with pytest.raises(AssertionError): bad_model_cfg = copy.deepcopy(model_cfg) bad_model_cfg['gan_loss'] = None _ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) assert (synthesizer.__class__.__name__ == 'Pix2Pix') assert isinstance(synthesizer.generator, UnetGenerator) assert isinstance(synthesizer.discriminator, PatchDiscriminator) assert isinstance(synthesizer.gan_loss, GANLoss) assert isinstance(synthesizer.pixel_loss, L1Loss) assert (synthesizer.train_cfg is None) assert (synthesizer.test_cfg is None) inputs = torch.rand(1, 3, 256, 256) targets = torch.rand(1, 3, 256, 256) data_batch = {'img_a': inputs, 'img_b': targets} img_meta = {} img_meta['img_a_path'] = 'img_a_path' img_meta['img_b_path'] = 'img_b_path' data_batch['meta'] = [img_meta] optim_cfg = dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminator').parameters()))} with torch.no_grad(): output = synthesizer.forward_dummy(data_batch['img_a']) assert torch.is_tensor(output) assert (output.size() == (1, 3, 256, 256)) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) with torch.no_grad(): outputs = synthesizer.val_step(data_batch) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) outputs = synthesizer(inputs, targets, [img_meta], test_mode=False) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert (outputs['results']['fake_b'].size() == (1, 3, 256, 256)) if torch.cuda.is_available(): synthesizer = synthesizer.cuda() optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminator').parameters()))} data_batch_cuda = copy.deepcopy(data_batch) data_batch_cuda['img_a'] = inputs.cuda() data_batch_cuda['img_b'] = targets.cuda() data_batch_cuda['meta'] = [DC(img_meta, cpu_only=True).data] with torch.no_grad(): outputs = synthesizer(data_batch_cuda['img_a'], data_batch_cuda['img_b'], data_batch_cuda['meta'], test_mode=True) assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu()) assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu()) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) with torch.no_grad(): outputs = synthesizer.val_step(data_batch_cuda) assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu()) assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu()) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) outputs = synthesizer(data_batch_cuda['img_a'], data_batch_cuda['img_b'], data_batch_cuda['meta'], test_mode=False) assert torch.equal(outputs['real_a'], data_batch_cuda['img_a']) assert torch.equal(outputs['real_b'], data_batch_cuda['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) outputs = synthesizer.train_step(data_batch_cuda, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch_cuda['img_a'].cpu()) assert torch.equal(outputs['results']['real_b'], data_batch_cuda['img_b'].cpu()) assert torch.is_tensor(outputs['results']['fake_b']) assert (outputs['results']['fake_b'].size() == (1, 3, 256, 256)) data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() train_cfg = dict(disc_steps=2, disc_init_steps=2) synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminator').parameters()))} for i in range(2): assert (synthesizer.step_counter == i) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) assert (outputs['log_vars'].get('loss_gan_g') is None) assert (outputs['log_vars'].get('loss_pixel') is None) for v in ['loss_gan_d_fake', 'loss_gan_d_real']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert (outputs['results']['fake_b'].size() == (1, 3, 256, 256)) assert (synthesizer.step_counter == (i + 1)) for i in range(2, 6): assert (synthesizer.step_counter == i) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) log_check_list = ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'] if ((i % 2) == 1): assert (outputs['log_vars'].get('loss_gan_g') is None) assert (outputs['log_vars'].get('loss_pixel') is None) log_check_list.remove('loss_gan_g') log_check_list.remove('loss_pixel') for v in log_check_list: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert (outputs['results']['fake_b'].size() == (1, 3, 256, 256)) assert (synthesizer.step_counter == (i + 1)) model_cfg_ = copy.deepcopy(model_cfg) model_cfg_.pop('pixel_loss') synthesizer = build_model(model_cfg_, train_cfg=None, test_cfg=None) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminator').parameters()))} data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) assert (outputs['log_vars'].get('loss_pixel') is None) for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_a']) assert torch.equal(outputs['results']['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['results']['fake_b']) assert (outputs['results']['fake_b'].size() == (1, 3, 256, 256)) data_batch['img_a'] = inputs.cpu() data_batch['img_b'] = targets.cpu() train_cfg = dict(direction='b2a') synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) optimizer = {'generator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'generator').parameters())), 'discriminator': obj_from_dict(optim_cfg, torch.optim, dict(params=getattr(synthesizer, 'discriminator').parameters()))} assert (synthesizer.step_counter == 0) outputs = synthesizer.train_step(data_batch, optimizer) assert isinstance(outputs, dict) assert isinstance(outputs['log_vars'], dict) assert isinstance(outputs['results'], dict) for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel']: assert isinstance(outputs['log_vars'][v], float) assert (outputs['num_samples'] == 1) assert torch.equal(outputs['results']['real_a'], data_batch['img_b']) assert torch.equal(outputs['results']['real_b'], data_batch['img_a']) assert torch.is_tensor(outputs['results']['fake_b']) assert (outputs['results']['fake_b'].size() == (1, 3, 256, 256)) assert (synthesizer.step_counter == 1) train_cfg = None test_cfg = dict(show_input=True) synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(mmcv, 'imwrite', return_value=True): with pytest.raises(AssertionError): with torch.no_grad(): _ = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path') assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) assert outputs['saved_flag'] with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path', iteration=1000) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) assert outputs['saved_flag'] train_cfg = None test_cfg = dict(show_input=False) synthesizer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg) with patch.object(mmcv, 'imwrite', return_value=True): with pytest.raises(AssertionError): with torch.no_grad(): _ = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True) with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path') assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) assert outputs['saved_flag'] with torch.no_grad(): outputs = synthesizer(inputs, targets, [img_meta], test_mode=True, save_image=True, save_path='save_path', iteration=1000) assert torch.equal(outputs['real_a'], data_batch['img_a']) assert torch.equal(outputs['real_b'], data_batch['img_b']) assert torch.is_tensor(outputs['fake_b']) assert (outputs['fake_b'].size() == (1, 3, 256, 256)) assert outputs['saved_flag']
def test_setup_multi_processes(): sys_start_mehod = mp.get_start_method(allow_none=True) sys_cv_threads = cv2.getNumThreads() sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None) sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None) config = dict(data=dict(workers_per_gpu=2)) cfg = Config(config) setup_multi_processes(cfg) assert (os.getenv('OMP_NUM_THREADS') == '1') assert (os.getenv('MKL_NUM_THREADS') == '1') assert (cv2.getNumThreads() == 1) if (platform.system() != 'Windows'): assert (mp.get_start_method() == 'fork') os.environ.pop('OMP_NUM_THREADS') os.environ.pop('MKL_NUM_THREADS') config = dict(data=dict(workers_per_gpu=0)) cfg = Config(config) setup_multi_processes(cfg) assert ('OMP_NUM_THREADS' not in os.environ) assert ('MKL_NUM_THREADS' not in os.environ) os.environ['OMP_NUM_THREADS'] = '4' config = dict(data=dict(workers_per_gpu=2)) cfg = Config(config) setup_multi_processes(cfg) assert (os.getenv('OMP_NUM_THREADS') == '4') config = dict(data=dict(workers_per_gpu=2), opencv_num_threads=4, mp_start_method='spawn') cfg = Config(config) setup_multi_processes(cfg) assert (cv2.getNumThreads() == 4) assert (mp.get_start_method() == 'spawn') if sys_start_mehod: mp.set_start_method(sys_start_mehod, force=True) cv2.setNumThreads(sys_cv_threads) if sys_omp_threads: os.environ['OMP_NUM_THREADS'] = sys_omp_threads else: os.environ.pop('OMP_NUM_THREADS') if sys_mkl_threads: os.environ['MKL_NUM_THREADS'] = sys_mkl_threads else: os.environ.pop('MKL_NUM_THREADS')
def generate_json(data_root, seg_root, bg_root, all_data): 'Generate training json list for Background Matting video dataset.\n\n Args:\n data_root (str): Background Matting video data root.\n seg_root (str): Segmentation of Background Matting video data root.\n bg_root (str): Background video data root.\n all_data (bool): Whether use the last 80 frames of each video. If True,\n the last 80 frames will be added to training json. In the original\n Background Matting github repo, due to the use of motion cue, the\n last 80 frames is not used.\n ' video_root = osp.join(data_root, 'fixed-camera/train') if (seg_root is None): seg_root = video_root if (bg_root is None): bg_root = osp.join(data_root, 'background') video_dirs = [entry for entry in os.listdir(video_root) if osp.isdir(osp.join(video_root, entry))] bg_dirs = [entry for entry in os.listdir(bg_root) if osp.isdir(osp.join(bg_root, entry))] bg_frames = [] for bg_dir in bg_dirs: bg_frames.extend([osp.join(bg_root, bg_dir, f) for f in sorted(mmcv.scandir(osp.join(bg_root, bg_dir)))]) bg_stream = cycle(bg_frames) data_infos = [] for video_dir in video_dirs: video_full_path = osp.join(video_root, video_dir) seg_full_path = osp.join(seg_root, video_dir) num_frames = len(list(mmcv.scandir(video_full_path, suffix='_img.png'))) effective_frames = (num_frames if all_data else (num_frames - 80)) for i in range(1, (effective_frames + 1)): merged = osp.join(video_full_path, f'{i:04d}_img.png') seg = osp.join(seg_full_path, f'{i:04d}_masksDL.png') bg = (video_full_path + '.png') bg_sup = next(bg_stream) data_info = dict(merged_path=merged, seg_path=seg, bg_path=bg, bg_sup_path=bg_sup) data_infos.append(data_info) save_json_path = 'fixed_camera_train.json' mmcv.dump(data_infos, osp.join(data_root, save_json_path))
def parse_args(): parser = argparse.ArgumentParser(description='Prepare Background Matting video dataset', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('data_root', help='Background Matting video data root') parser.add_argument('--seg-root', help='Segmentation of Background Matting video data root. If not specified, it will be considered segmentation results are placed in the video frames folder just as the original repo.') parser.add_argument('--bg-root', help='Background video data root. If not specified, it will use the three background videos in the Captured_Data folder.') parser.add_argument('--all-data', action='store_true', help='Also use the last 80 frames of each video') return parser.parse_args()
def main(): args = parse_args() if (not osp.exists(args.data_root)): raise FileNotFoundError(f'{args.data_root} does not exist!') print('generating Background Matting dataset annotation file...') generate_json(args.data_root, args.seg_root, args.bg_root, args.all_data) print('annotation file generated...')
def fix_png_file(filename, folder): "Fix png files in the target filename using pngfix.\n\n pngfix is a tool to fix PNG files. It's installed on Linux or MacOS by\n default.\n\n Args:\n filename (str): png file to run pngfix.\n " subprocess.call(f'pngfix --quiet --strip=color --prefix=fixed_ "{filename}"', cwd=f'{folder}', shell=True) subprocess.call(f'mv "fixed_{filename}" "{filename}"', cwd=f'{folder}', shell=True)
def join_first_contain(directories, filename, data_root): 'Join the first directory that contains the file.\n\n Args:\n directories (list[str]): Directories to search for the file.\n filename (str): The target filename.\n data_root (str): Root of the data path.\n ' for directory in directories: cur_path = osp.join(directory, filename) if osp.exists(osp.join(data_root, cur_path)): return cur_path raise FileNotFoundError(f'Cannot find {filename} in dirs {directories}')
class ExtendFg(): def __init__(self, data_root, fg_dirs, alpha_dirs) -> None: self.data_root = data_root self.fg_dirs = fg_dirs self.alpha_dirs = alpha_dirs def extend(self, fg_name): fg_name = fg_name.strip() alpha_path = join_first_contain(self.alpha_dirs, fg_name, self.data_root) fg_path = join_first_contain(self.fg_dirs, fg_name, self.data_root) alpha_path = osp.join(self.data_root, alpha_path) fg_path = osp.join(self.data_root, fg_path) extended_path = re.sub('/fg/', '/fg_extended/', fg_path) extended_path = extended_path.replace('jpg', 'png') if (not osp.exists(alpha_path)): raise FileNotFoundError(f'{alpha_path} does not exist!') if (not osp.exists(fg_path)): raise FileNotFoundError(f'{fg_path} does not exist!') image = load_image(fg_path, 'RGB') alpha = load_image(alpha_path, 'GRAY') F = estimate_foreground_ml(image, alpha, return_background=False) fg = Image.fromarray(np.uint8((F * 255))) fg.save(extended_path) fix_png_file(osp.basename(extended_path), osp.dirname(extended_path)) data_info = dict() data_info['alpha_path'] = alpha_path data_info['fg_path'] = extended_path return data_info
def parse_args(): parser = argparse.ArgumentParser(description='Prepare Adobe composition 1k dataset', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('data_root', help='Adobe composition 1k dataset root') parser.add_argument('--nproc', type=int, default=4, help='number of processor') args = parser.parse_args() return args
def main(): args = parse_args() if (not osp.exists(args.data_root)): raise FileNotFoundError(f'{args.data_root} does not exist!') data_root = args.data_root print('preparing training data...') dir_prefix = 'Training_set' fname_prefix = 'training' fg_dirs = ['Training_set/Adobe-licensed images/fg', 'Training_set/Other/fg'] alpha_dirs = ['Training_set/Adobe-licensed images/alpha', 'Training_set/Other/alpha'] extended_dirs = ['Training_set/Adobe-licensed images/fg_extended', 'Training_set/Other/fg_extended'] for p in extended_dirs: p = osp.join(data_root, p) os.makedirs(p, exist_ok=True) fg_names = osp.join(dir_prefix, f'{fname_prefix}_fg_names.txt') save_json_path = f'{fname_prefix}_list_fba.json' fg_names = open(osp.join(data_root, fg_names)).readlines() fg_iter = iter(fg_names) extend_fg = ExtendFg(data_root, fg_dirs, alpha_dirs) data_infos = mmcv.track_parallel_progress(extend_fg.extend, list(fg_iter), args.nproc) mmcv.dump(data_infos, osp.join(data_root, save_json_path)) print('train done')
def generate_json(comp1k_json_path, target_list_path, save_json_path): data_infos = mmcv.load(comp1k_json_path) targets = mmcv.list_from_file(target_list_path) new_data_infos = [] for data_info in data_infos: for target in targets: if data_info['alpha_path'].endswith(target): new_data_infos.append(data_info) break mmcv.dump(new_data_infos, save_json_path)
def parse_args(): parser = argparse.ArgumentParser(description='Filter composition-1k annotation file') parser.add_argument('comp1k_json_path', help='Path to the composition-1k dataset annotation file') parser.add_argument('target_list_path', help='Path to the file name list that need to filter out') parser.add_argument('save_json_path', help='Path to save the result json file') return parser.parse_args()
def main(): args = parse_args() if (not osp.exists(args.comp1k_json_path)): raise FileNotFoundError(f'{args.comp1k_json_path} does not exist!') if (not osp.exists(args.target_list_path)): raise FileNotFoundError(f'{args.target_list_path} does not exist!') generate_json(args.comp1k_json_path, args.target_list_path, args.save_json_path) print('Done!')
def fix_png_files(directory): "Fix png files in the target directory using pngfix.\n\n pngfix is a tool to fix PNG files. It's installed on Linux or MacOS by\n default.\n\n Args:\n directory (str): Directory to run pngfix.\n " subprocess.call('pngfix --quiet --strip=color --prefix=fixed_ *.png', cwd=f'{directory}', shell=True) subprocess.call('for fixed_f in fixed_*; do mv "$fixed_f" "${fixed_f:6}"; done', cwd=f'{directory}', shell=True)
def fix_png_file(filename, folder): "Fix png files in the target filename using pngfix.\n\n pngfix is a tool to fix PNG files. It's installed on Linux or MacOS by\n default.\n\n Args:\n filename (str): png file to run pngfix.\n " subprocess.call(f'pngfix --quiet --strip=color --prefix=fixed_ "{filename}"', cwd=f'{folder}', shell=True) subprocess.call(f'mv "fixed_{filename}" "{filename}"', cwd=f'{folder}', shell=True)
def join_first_contain(directories, filename, data_root): 'Join the first directory that contains the file.\n\n Args:\n directories (list[str]): Directories to search for the file.\n filename (str): The target filename.\n data_root (str): Root of the data path.\n ' for directory in directories: cur_path = osp.join(directory, filename) if osp.exists(osp.join(data_root, cur_path)): return cur_path raise FileNotFoundError(f'Cannot find {filename} in dirs {directories}')
def get_data_info(args): 'Function to process one piece of data.\n\n Args:\n args (tuple): Information needed to process one piece of data.\n\n Returns:\n dict: The processed data info.\n ' (name_with_postfix, source_bg_path, repeat_info, constant) = args (alpha, fg, alpha_path, fg_path) = repeat_info (data_root, composite, mode) = constant if (mode == 'training'): dir_prefix = 'Training_set' trimap_dir = None elif (mode == 'test'): dir_prefix = 'Test_set' trimap_dir = 'Test_set/Adobe-licensed images/trimaps' else: raise KeyError(f'Unknown mode {mode}.') bg_path = osp.join(dir_prefix, 'bg', name_with_postfix).replace('.jpg', '.png') merged_path = osp.join(dir_prefix, 'merged', name_with_postfix).replace('.jpg', '.png') if (not osp.exists(source_bg_path)): raise FileNotFoundError(f'{source_bg_path} does not exist!') try: bg = Image.open(source_bg_path).convert('RGB') except Exception as ex: data_info = {'alpha_path': alpha_path, 'fg_path': fg_path, 'bg_path': bg_path} print('err in ', data_info, ex) return data_info (bw, bh) = bg.size (w, h) = fg.size wratio = (float(w) / bw) hratio = (float(h) / bh) ratio = (wratio if (wratio > hratio) else hratio) if (ratio > 1): bg = bg.resize((math.ceil((bw * ratio)), math.ceil((bh * ratio))), Image.BICUBIC) bg = bg.crop((0, 0, w, h)) mmcv.utils.mkdir_or_exist(osp.join(data_root, dir_prefix, 'bg')) bgfilename = osp.join(data_root, bg_path) bg.save(bgfilename, 'PNG') fix_png_file(osp.basename(bgfilename), osp.dirname(bgfilename)) if composite: merged = ((fg * alpha) + (bg * (1.0 - alpha))).astype(np.uint8) mmcv.utils.mkdir_or_exist(osp.join(data_root, dir_prefix, 'merged')) mergedfilename = osp.join(data_root, merged_path) Image.fromarray(merged).save(mergedfilename, 'PNG') fix_png_file(osp.basename(mergedfilename), osp.dirname(mergedfilename)) data_info = dict() data_info['alpha_path'] = alpha_path data_info['fg_path'] = fg_path data_info['bg_path'] = bg_path data_info['merged_path'] = merged_path if (trimap_dir is not None): trimap_path = osp.join(trimap_dir, name_with_postfix) trimap_full_path = osp.join(data_root, trimap_path) if (not osp.exists(trimap_full_path)): raise FileNotFoundError(f'{trimap_full_path} does not exist!') data_info['trimap_path'] = trimap_path return data_info
def generate_json(data_root, source_bg_dir, composite, nproc, mode): 'Generate training json list or test json list.\n\n It should be noted except for `source_bg_dir`, other strings are incomplete\n relative path. When using these strings to read from or write to disk, a\n data_root is added to form a complete relative path.\n\n Args:\n data_root (str): path to Adobe composition-1k directory.\n source_bg_dir (str): source background directory.\n composite (bool): whether composite fg with bg and write to file.\n nproc (int): number of processers.\n mode (str): training or test mode.\n ' if (mode == 'training'): dir_prefix = 'Training_set' fname_prefix = 'training' num_bg = 100 fg_dirs = ['Training_set/Adobe-licensed images/fg', 'Training_set/Other/fg'] alpha_dirs = ['Training_set/Adobe-licensed images/alpha', 'Training_set/Other/alpha'] elif (mode == 'test'): dir_prefix = 'Test_set' fname_prefix = 'test' num_bg = 20 fg_dirs = ['Test_set/Adobe-licensed images/fg'] alpha_dirs = ['Test_set/Adobe-licensed images/alpha'] else: raise KeyError(f'Unknown mode {mode}.') fg_names = osp.join(dir_prefix, f'{fname_prefix}_fg_names.txt') bg_names = osp.join(dir_prefix, f'{fname_prefix}_bg_names.txt') save_json_path = f'{fname_prefix}_list.json' fg_names = open(osp.join(data_root, fg_names)).readlines() bg_names = open(osp.join(data_root, bg_names)).readlines() assert ((len(fg_names) * num_bg) == len(bg_names)) repeat_infos = [] name_with_postfix = [] for fg_name in fg_names: fg_name = fg_name.strip() alpha_path = join_first_contain(alpha_dirs, fg_name, data_root) fg_path = join_first_contain(fg_dirs, fg_name, data_root) alpha_full_path = osp.join(data_root, alpha_path) fg_full_path = osp.join(data_root, fg_path) if (not osp.exists(alpha_full_path)): raise FileNotFoundError(f'{alpha_full_path} does not exist!') if (not osp.exists(fg_full_path)): raise FileNotFoundError(f'{fg_full_path} does not exist!') fg = Image.open(fg_full_path).convert('RGB') alpha = ((np.array(Image.open(alpha_full_path).convert('RGB')) / 255.0) if composite else None) repeat_infos.append((alpha, fg, alpha_path, fg_path)) for bg_idx in range(num_bg): name_with_postfix.append((((fg_name[:(- 4)] + '_') + str(bg_idx)) + fg_name[(- 4):])) repeat_infos = chain.from_iterable((repeat(repeat_info, num_bg) for repeat_info in repeat_infos)) source_bg_paths = [] for bg_name in bg_names: bg_name = bg_name.strip() if ('2017' in source_bg_dir): bg_name = bg_name[15:] source_bg_paths.append(osp.join(source_bg_dir, bg_name)) constants = repeat((data_root, composite, mode), len(bg_names)) data_infos = mmcv.track_parallel_progress(get_data_info, list(zip(name_with_postfix, source_bg_paths, repeat_infos, constants)), nproc) mmcv.dump(data_infos, osp.join(data_root, save_json_path))
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='Prepare Adobe composition 1k dataset', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('data_root', help='Adobe composition 1k dataset root') parser.add_argument('coco_root', help='COCO2014 or COCO2017 dataset root') parser.add_argument('voc_root', help='VOCdevkit directory root') parser.add_argument('--composite', action='store_true', help='whether to composite training foreground and background offline') parser.add_argument('--nproc', type=int, default=4, help='number of processer') parser.add_argument('--skip-train', action='store_true', help='whether to skip the training data') args = parser.parse_args() return args
def main(): args = parse_args() if (not osp.exists(args.data_root)): raise FileNotFoundError(f'{args.data_root} does not exist!') if (not osp.exists(args.coco_root)): raise FileNotFoundError(f'{args.coco_root} does not exist!') if (not osp.exists(args.voc_root)): raise FileNotFoundError(f'{args.voc_root} does not exist!') data_root = args.data_root if (not args.skip_train): print('preparing training data...') if osp.exists(osp.join(args.coco_root, 'train2017')): train_source_bg_dir = osp.join(args.coco_root, 'train2017') elif osp.exists(osp.join(args.coco_root, 'train2014')): train_source_bg_dir = osp.join(args.coco_root, 'train2014') else: raise FileNotFoundError(f'Could not find train2014 or train2017 under {args.coco_root}') generate_json(data_root, train_source_bg_dir, args.composite, args.nproc, 'training') print('train done') fg_dir = 'Test_set/Adobe-licensed images/fg' alpha_dir = 'Test_set/Adobe-licensed images/alpha' print('fixing png of test fg') fix_png_files(osp.join(data_root, fg_dir)) print('fixing png of test alpha') fix_png_files(osp.join(data_root, alpha_dir)) print('\npreparing test data...') test_source_bg_dir = osp.join(args.voc_root, 'VOC2012/JPEGImages') generate_json(data_root, test_source_bg_dir, True, args.nproc, 'test') print('\nDone!')
def make_lmdb(mode, data_path, lmdb_path, batch=5000, compress_level=1): "Create lmdb for the REDS dataset.\n\n Contents of lmdb. The file structure is:\n example.lmdb\n β”œβ”€β”€ data.mdb\n β”œβ”€β”€ lock.mdb\n β”œβ”€β”€ meta_info.txt\n\n The data.mdb and lock.mdb are standard lmdb files and you can refer to\n https://lmdb.readthedocs.io/en/release/ for more details.\n\n The meta_info.txt is a specified txt file to record the meta information\n of our datasets. It will be automatically created when preparing\n datasets by our provided dataset tools.\n Each line in the txt file records 1)image name (with extension),\n 2)image shape, and 3)compression level, separated by a white space.\n\n For example, the meta information could be:\n `000_00000000.png (720,1280,3) 1`, which means:\n 1) image name (with extension): 000_00000000.png;\n 2) image shape: (720,1280,3);\n 3) compression level: 1\n\n We use the image name without extension as the lmdb key.\n\n Args:\n mode (str): REDS dataset mode. Choices: ['train_sharp', 'train_blur',\n 'train_blur_comp', 'train_sharp_bicubic', 'train_blur_bicubic'].\n They are used to identify different reds dataset for different\n tasks. Specifically:\n 'train_sharp': GT frames;\n 'train_blur': Blur frames for deblur task.\n 'train_blur_comp': Blur and compressed frames for deblur and\n compression task.\n 'train_sharp_bicubic': Bicubic downsampled sharp frames for SR\n task.\n 'train_blur_bicubic': Bicubic downsampled blur frames for SR task.\n data_path (str): Data path for reading images.\n lmdb_path (str): Lmdb save path.\n batch (int): After processing batch images, lmdb commits.\n Default: 5000.\n compress_level (int): Compress level when encoding images. Default: 1.\n " print(f'Create lmdb for {data_path}, save to {lmdb_path}...') if (mode in ['train_sharp', 'train_blur', 'train_blur_comp']): (h_dst, w_dst) = (720, 1280) else: (h_dst, w_dst) = (180, 320) if osp.exists(lmdb_path): print(f'Folder {lmdb_path} already exists. Exit.') sys.exit(1) print('Reading image path list ...') img_path_list = sorted(list(mmcv.scandir(data_path, suffix='png', recursive=True))) keys = [] for img_path in img_path_list: parts = re.split('[\\\\/]', img_path) folder = parts[(- 2)] img_name = parts[(- 1)].split('.png')[0] keys.append(((folder + '_') + img_name)) img = mmcv.imread(osp.join(data_path, img_path_list[0]), flag='unchanged') (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) data_size_per_img = img_byte.nbytes print('Data size per image is: ', data_size_per_img) data_size = (data_size_per_img * len(img_path_list)) env = lmdb.open(lmdb_path, map_size=(data_size * 10)) pbar = mmcv.ProgressBar(len(img_path_list)) txn = env.begin(write=True) txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') for (idx, (path, key)) in enumerate(zip(img_path_list, keys)): pbar.update() key_byte = key.encode('ascii') img = mmcv.imread(osp.join(data_path, path), flag='unchanged') (h, w, c) = img.shape (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) assert ((h == h_dst) and (w == w_dst) and (c == 3)), f'Wrong shape ({(h, w)}), should be ({(h_dst, w_dst)}).' txn.put(key_byte, img_byte) txt_file.write(f'''{key}.png ({h},{w},{c}) {compress_level} ''') if ((idx % batch) == 0): txn.commit() txn = env.begin(write=True) txn.commit() env.close() txt_file.close() print('\nFinish writing lmdb.')
def merge_train_val(train_path, val_path): 'Merge the train and val datasets of REDS.\n\n Because the EDVR uses a different validation partition, so we merge train\n and val datasets in REDS for easy switching between REDS4 partition (used\n in EDVR) and the official validation partition.\n\n The original val dataset (clip names from 000 to 029) are modified to avoid\n conflicts with training dataset (total 240 clips). Specifically, the clip\n names are changed to 240, 241, ... 269.\n\n Args:\n train_path (str): Training folder paths.\n val_path (str): Validation folder paths.\n ' print(f'Move {val_path} to {train_path}...') val_folders = glob.glob(osp.join(val_path, '*')) for folder in val_folders: index = int(re.split('[\\\\/]', folder)[(- 1)]) new_folder_idx = f'{(index + 240):03d}' shutil.move(folder, osp.join(train_path, new_folder_idx))
def generate_anno_file(root_path, file_name='meta_info_REDS_GT.txt'): 'Generate anno file for REDS datasets from the ground-truth folder.\n\n Args:\n root_path (str): Root path for REDS datasets.\n ' print(f'Generate annotation files {file_name}...') txt_file = osp.join(root_path, file_name) mmcv.utils.mkdir_or_exist(osp.dirname(txt_file)) with open(txt_file, 'w') as f: for i in range(270): for j in range(100): f.write(f'''{i:03d}/{j:08d}.png (720, 1280, 3) ''')
def unzip(zip_path): 'Unzip zip files. It will scan all zip files in zip_path and return unzip\n folder names.\n\n Args:\n zip_path (str): Path for zip files.\n\n Returns:\n list: unzip folder names.\n ' zip_files = mmcv.scandir(zip_path, suffix='zip', recursive=False) import shutil import zipfile unzip_folders = [] for zip_file in zip_files: zip_file = osp.join(zip_path, zip_file) unzip_folder = zip_file.replace('.zip', '').split('_part')[0] print(f'Unzip {zip_file} to {unzip_folder}') with zipfile.ZipFile(zip_file, 'r') as zip_ref: zip_ref.extractall(unzip_folder) data_name = osp.basename(unzip_folder) data_type = data_name.split('_')[0] if osp.isdir(osp.join(unzip_folder, data_type, data_name)): data_folder = osp.join(unzip_folder, data_type, data_name) for i in os.listdir(data_folder): shutil.move(osp.join(data_folder, i), unzip_folder) shutil.rmtree(osp.join(unzip_folder, data_type)) unzip_folders.append(unzip_folder) return unzip_folders