code stringlengths 17 6.64M |
|---|
class TestSRDatasets():
@classmethod
def setup_class(cls):
cls.data_prefix = (Path(__file__).parent.parent.parent / 'data')
def test_base_super_resolution_dataset(self):
class ToyDataset(BaseSRDataset):
'Toy dataset for testing SRDataset.'
def __init__(self, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
def load_annotations(self):
pass
def __len__(self):
return 2
toy_dataset = ToyDataset(pipeline=[])
file_paths = [osp.join('gt', 'baboon.png'), osp.join('lq', 'baboon_x4.png')]
file_paths = [str((self.data_prefix / v)) for v in file_paths]
result = toy_dataset.scan_folder(self.data_prefix)
assert set(file_paths).issubset(set(result))
result = toy_dataset.scan_folder(str(self.data_prefix))
assert set(file_paths).issubset(set(result))
with pytest.raises(TypeError):
toy_dataset.scan_folder(123)
results = [{'eval_result': {'PSNR': 20, 'SSIM': 0.6}}, {'eval_result': {'PSNR': 30, 'SSIM': 0.8}}]
with pytest.raises(TypeError):
toy_dataset.evaluate(results=5)
with pytest.raises(AssertionError):
toy_dataset.evaluate(results=[results[0]])
eval_result = toy_dataset.evaluate(results=results)
assert (eval_result == {'PSNR': 25, 'SSIM': 0.7})
with pytest.raises(AssertionError):
results = [{'eval_result': {'PSNR': 20, 'SSIM': 0.6}}, {'eval_result': {'PSNR': 30}}]
toy_dataset.evaluate(results=results)
def test_sr_annotation_dataset(self):
anno_file_path = (self.data_prefix / 'train.txt')
sr_pipeline = [dict(type='LoadImageFromFile', io_backend='disk', key='lq'), dict(type='LoadImageFromFile', io_backend='disk', key='gt'), dict(type='PairedRandomCrop', gt_patch_size=128), dict(type='ImageToTensor', keys=['lq', 'gt'])]
target_keys = ['lq_path', 'gt_path', 'scale', 'lq', 'lq_ori_shape', 'gt', 'gt_ori_shape']
sr_annotation_dataset = SRAnnotationDataset(lq_folder=(self.data_prefix / 'lq'), gt_folder=(self.data_prefix / 'gt'), ann_file=anno_file_path, pipeline=sr_pipeline, scale=4, filename_tmpl='{}_x4')
data_infos = sr_annotation_dataset.data_infos
assert (data_infos == [dict(lq_path=str(((self.data_prefix / 'lq') / 'baboon_x4.png')), gt_path=str(((self.data_prefix / 'gt') / 'baboon.png')))])
result = sr_annotation_dataset[0]
assert (len(sr_annotation_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
sr_annotation_dataset = SRAnnotationDataset(lq_folder=str((self.data_prefix / 'lq')), gt_folder=str((self.data_prefix / 'gt')), ann_file=str(anno_file_path), pipeline=sr_pipeline, scale=4, filename_tmpl='{}_x4')
data_infos = sr_annotation_dataset.data_infos
assert (data_infos == [dict(lq_path=str(((self.data_prefix / 'lq') / 'baboon_x4.png')), gt_path=str(((self.data_prefix / 'gt') / 'baboon.png')))])
result = sr_annotation_dataset[0]
assert (len(sr_annotation_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_folder_dataset(self):
sr_pipeline = [dict(type='LoadImageFromFile', io_backend='disk', key='lq'), dict(type='LoadImageFromFile', io_backend='disk', key='gt'), dict(type='PairedRandomCrop', gt_patch_size=128), dict(type='ImageToTensor', keys=['lq', 'gt'])]
target_keys = ['lq_path', 'gt_path', 'scale', 'lq', 'gt']
lq_folder = (self.data_prefix / 'lq')
gt_folder = (self.data_prefix / 'gt')
filename_tmpl = '{}_x4'
sr_folder_dataset = SRFolderDataset(lq_folder=lq_folder, gt_folder=gt_folder, pipeline=sr_pipeline, scale=4, filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert (data_infos == [dict(lq_path=str((lq_folder / 'baboon_x4.png')), gt_path=str((gt_folder / 'baboon.png')))])
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
sr_folder_dataset = SRFolderDataset(lq_folder=str(lq_folder), gt_folder=str(gt_folder), pipeline=sr_pipeline, scale=4, filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert (data_infos == [dict(lq_path=str((lq_folder / 'baboon_x4.png')), gt_path=str((gt_folder / 'baboon.png')))])
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_folder_gt_dataset(self):
sr_pipeline = [dict(type='LoadImageFromFile', io_backend='disk', key='gt'), dict(type='ImageToTensor', keys=['gt'])]
target_keys = ['gt_path', 'gt']
gt_folder = (self.data_prefix / 'gt')
filename_tmpl = '{}_x4'
sr_folder_dataset = SRFolderGTDataset(gt_folder=gt_folder, pipeline=sr_pipeline, scale=4, filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert (data_infos == [dict(gt_path=str((gt_folder / 'baboon.png')))])
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
sr_folder_dataset = SRFolderGTDataset(gt_folder=str(gt_folder), pipeline=sr_pipeline, scale=4, filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert (data_infos == [dict(gt_path=str((gt_folder / 'baboon.png')))])
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_folder_ref_dataset(self):
sr_pipeline = [dict(type='LoadImageFromFile', io_backend='disk', key='lq'), dict(type='LoadImageFromFile', io_backend='disk', key='gt'), dict(type='LoadImageFromFile', io_backend='disk', key='ref'), dict(type='PairedRandomCrop', gt_patch_size=128), dict(type='ImageToTensor', keys=['lq', 'gt', 'ref'])]
target_keys = ['lq_path', 'gt_path', 'ref_path', 'scale', 'lq', 'gt', 'ref']
lq_folder = (self.data_prefix / 'lq')
gt_folder = (self.data_prefix / 'gt')
ref_folder = (self.data_prefix / 'gt')
filename_tmpl = '{}_x4'
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=lq_folder, gt_folder=gt_folder, ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
data_infos = sr_folder_ref_dataset.data_infos
assert (data_infos == [dict(lq_path=str((lq_folder / 'baboon_x4.png')), gt_path=str((gt_folder / 'baboon.png')), ref_path=str((ref_folder / 'baboon.png')))])
result = sr_folder_ref_dataset[0]
assert (len(sr_folder_ref_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=str(lq_folder), gt_folder=str(gt_folder), ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
data_infos = sr_folder_ref_dataset.data_infos
assert (data_infos == [dict(lq_path=str((lq_folder / 'baboon_x4.png')), gt_path=str((gt_folder / 'baboon.png')), ref_path=str((ref_folder / 'baboon.png')))])
result = sr_folder_ref_dataset[0]
assert (len(sr_folder_ref_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=str(lq_folder), gt_folder=str((self.data_prefix / 'image')), ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=str((self.data_prefix / 'image')), gt_folder=str(gt_folder), ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=str(lq_folder), gt_folder=str((self.data_prefix / 'bg')), ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=str((self.data_prefix / 'bg')), gt_folder=str(gt_folder), ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(lq_folder=None, gt_folder=None, ref_folder=str(ref_folder), pipeline=sr_pipeline, scale=4, filename_tmpl_lq=filename_tmpl)
def test_sr_landmark_dataset(self):
sr_pipeline = [dict(type='LoadImageFromFile', io_backend='disk', key='gt', flag='color', channel_order='rgb', backend='cv2')]
target_keys = ['gt_path', 'bbox', 'shape', 'landmark']
gt_folder = (self.data_prefix / 'face')
ann_file = (self.data_prefix / 'facemark_ann.npy')
sr_landmark_dataset = SRFacialLandmarkDataset(gt_folder=gt_folder, ann_file=ann_file, pipeline=sr_pipeline, scale=4)
data_infos = sr_landmark_dataset.data_infos
assert (len(data_infos) == 1)
result = sr_landmark_dataset[0]
assert (len(sr_landmark_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
sr_landmark_dataset = SRFacialLandmarkDataset(gt_folder=str(gt_folder), ann_file=str(ann_file), pipeline=sr_pipeline, scale=4)
data_infos = sr_landmark_dataset.data_infos
assert (len(data_infos) == 1)
result = sr_landmark_dataset[0]
assert (len(sr_landmark_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_lmdb_dataset(self):
lq_lmdb_folder = (self.data_prefix / 'lq.lmdb')
sr_pipeline = [dict(type='LoadImageFromFile', io_backend='lmdb', key='lq', db_path=lq_lmdb_folder), dict(type='LoadImageFromFile', io_backend='lmdb', key='gt', db_path=lq_lmdb_folder), dict(type='ImageToTensor', keys=['lq', 'gt'])]
target_keys = ['lq_path', 'gt_path', 'scale', 'lq', 'lq_ori_shape', 'gt', 'gt_ori_shape']
sr_lmdb_dataset = SRLmdbDataset(lq_folder=lq_lmdb_folder, gt_folder=lq_lmdb_folder, pipeline=sr_pipeline, scale=1)
data_infos = sr_lmdb_dataset.data_infos
assert (data_infos == [dict(lq_path='baboon', gt_path='baboon')])
result = sr_lmdb_dataset[0]
assert (len(sr_lmdb_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
sr_lmdb_dataset = SRLmdbDataset(lq_folder=str(lq_lmdb_folder), gt_folder=lq_lmdb_folder, pipeline=sr_pipeline, scale=1)
data_infos = sr_lmdb_dataset.data_infos
assert (data_infos == [dict(lq_path='baboon', gt_path='baboon')])
result = sr_lmdb_dataset[0]
assert (len(sr_lmdb_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(lq_folder=self.data_prefix, gt_folder=lq_lmdb_folder, pipeline=sr_pipeline, scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(lq_folder=str(self.data_prefix), gt_folder=lq_lmdb_folder, pipeline=sr_pipeline, scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(lq_folder=lq_lmdb_folder, gt_folder=self.data_prefix, pipeline=sr_pipeline, scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(lq_folder=lq_lmdb_folder, gt_folder=str(self.data_prefix), pipeline=sr_pipeline, scale=1)
|
def test_reds_dataset():
root_path = (Path(__file__).parent.parent.parent / 'data')
txt_content = '000/00000001.png (720, 1280, 3)\n001/00000001.png (720, 1280, 3)\n250/00000001.png (720, 1280, 3)\n'
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
reds_dataset = SRREDSDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, val_partition='official', test_mode=False)
assert (reds_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('000', '00000001'), max_frame_num=100, num_input_frames=5), dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('001', '00000001'), max_frame_num=100, num_input_frames=5)])
reds_dataset = SRREDSDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, val_partition='REDS4', test_mode=False)
assert (reds_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('001', '00000001'), max_frame_num=100, num_input_frames=5), dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('250', '00000001'), max_frame_num=100, num_input_frames=5)])
with pytest.raises(ValueError):
reds_dataset = SRREDSDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, val_partition='wrong_val_partition', test_mode=False)
with pytest.raises(AssertionError):
reds_dataset = SRREDSDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=6, pipeline=[], scale=4, val_partition='wrong_val_partition', test_mode=False)
reds_dataset = SRREDSDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, val_partition='official', test_mode=True)
assert (reds_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('250', '00000001'), max_frame_num=100, num_input_frames=5)])
reds_dataset = SRREDSDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, val_partition='REDS4', test_mode=True)
assert (reds_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('000', '00000001'), max_frame_num=100, num_input_frames=5)])
|
def test_vimeo90k_dataset():
root_path = (Path(__file__).parent.parent.parent / 'data')
txt_content = '00001/0266 (256, 448, 3)\n00002/0268 (256, 448, 3)\n'
mocked_open_function = mock_open(read_data=txt_content)
lq_paths_1 = [str((((root_path / '00001') / '0266') / f'im{v}.png')) for v in range(1, 8)]
gt_paths_1 = [str((((root_path / '00001') / '0266') / 'im4.png'))]
lq_paths_2 = [str((((root_path / '00002') / '0268') / f'im{v}.png')) for v in range(1, 8)]
gt_paths_2 = [str((((root_path / '00002') / '0268') / 'im4.png'))]
with patch('builtins.open', mocked_open_function):
vimeo90k_dataset = SRVimeo90KDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=7, pipeline=[], scale=4, test_mode=False)
assert (vimeo90k_dataset.data_infos == [dict(lq_path=lq_paths_1, gt_path=gt_paths_1, key=osp.join('00001', '0266')), dict(lq_path=lq_paths_2, gt_path=gt_paths_2, key=osp.join('00002', '0268'))])
with pytest.raises(AssertionError):
vimeo90k_dataset = SRVimeo90KDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=6, pipeline=[], scale=4, test_mode=False)
|
def test_vid4_dataset():
root_path = (Path(__file__).parent.parent.parent / 'data')
txt_content = 'calendar 1 (320,480,3)\ncity 2 (320,480,3)\n'
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
vid4_dataset = SRVid4Dataset(lq_folder=(root_path / 'lq'), gt_folder=(root_path / 'gt'), ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, test_mode=False, metric_average_mode='clip', filename_tmpl='{:08d}')
assert (vid4_dataset.data_infos == [dict(lq_path=str((root_path / 'lq')), gt_path=str((root_path / 'gt')), key=osp.join('calendar', '00000000'), num_input_frames=5, max_frame_num=1), dict(lq_path=str((root_path / 'lq')), gt_path=str((root_path / 'gt')), key=osp.join('city', '00000000'), num_input_frames=5, max_frame_num=2), dict(lq_path=str((root_path / 'lq')), gt_path=str((root_path / 'gt')), key=osp.join('city', '00000001'), num_input_frames=5, max_frame_num=2)])
results = [{'eval_result': {'PSNR': 21, 'SSIM': 0.75}}, {'eval_result': {'PSNR': 22, 'SSIM': 0.8}}, {'eval_result': {'PSNR': 24, 'SSIM': 0.9}}]
eval_result = vid4_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 22)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.8)
vid4_dataset = SRVid4Dataset(lq_folder=(root_path / 'lq'), gt_folder=(root_path / 'gt'), ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, test_mode=False, metric_average_mode='all', filename_tmpl='{:08d}')
eval_result = vid4_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 22.3333333)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.81666666)
with pytest.raises(AssertionError):
SRVid4Dataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=6, pipeline=[], scale=4, test_mode=False)
with pytest.raises(ValueError):
SRVid4Dataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', num_input_frames=5, pipeline=[], scale=4, metric_average_mode='abc', test_mode=False)
with pytest.raises(TypeError):
vid4_dataset.evaluate(results=5)
with pytest.raises(AssertionError):
vid4_dataset.evaluate(results=[results[0]])
|
def test_sr_reds_multiple_gt_dataset():
root_path = (Path(__file__).parent.parent.parent / 'data')
reds_dataset = SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=15, pipeline=[], scale=4, val_partition='official', test_mode=False)
assert (len(reds_dataset.data_infos) == 240)
assert (reds_dataset.data_infos[0] == dict(lq_path=str(root_path), gt_path=str(root_path), key='000', sequence_length=100, num_input_frames=15))
reds_dataset = SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=20, pipeline=[], scale=4, val_partition='REDS4', test_mode=False)
assert (len(reds_dataset.data_infos) == 266)
assert (reds_dataset.data_infos[0] == dict(lq_path=str(root_path), gt_path=str(root_path), key='001', sequence_length=100, num_input_frames=20))
with pytest.raises(ValueError):
reds_dataset = SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, val_partition='wrong_val_partition', test_mode=False)
reds_dataset = SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, val_partition='official', test_mode=True)
assert (len(reds_dataset.data_infos) == 30)
assert (reds_dataset.data_infos[0] == dict(lq_path=str(root_path), gt_path=str(root_path), key='240', sequence_length=100, num_input_frames=5))
reds_dataset = SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, val_partition='REDS4', test_mode=True)
assert (len(reds_dataset.data_infos) == 4)
assert (reds_dataset.data_infos[1] == dict(lq_path=str(root_path), gt_path=str(root_path), key='011', sequence_length=100, num_input_frames=5))
reds_dataset = SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, val_partition='REDS4', repeat=2, test_mode=True)
assert (len(reds_dataset.data_infos) == 8)
assert (reds_dataset.data_infos[5] == dict(lq_path=str(root_path), gt_path=str(root_path), key='011', sequence_length=100, num_input_frames=5))
with pytest.raises(TypeError):
SRREDSMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, val_partition='REDS4', repeat=1.5, test_mode=True)
|
def test_sr_vimeo90k_mutiple_gt_dataset():
root_path = ((Path(__file__).parent.parent.parent / 'data') / 'vimeo90k')
txt_content = '00001/0266 (256,448,3)\n'
mocked_open_function = mock_open(read_data=txt_content)
num_input_frames = 5
lq_paths = [str((((root_path / '00001') / '0266') / f'im{v}.png')) for v in range(1, (num_input_frames + 1))]
gt_paths = [str((((root_path / '00001') / '0266') / f'im{v}.png')) for v in range(1, (num_input_frames + 1))]
with patch('builtins.open', mocked_open_function):
vimeo90k_dataset = SRVimeo90KMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, ann_file='fake_ann_file', pipeline=[], scale=4, num_input_frames=num_input_frames, test_mode=False)
assert (vimeo90k_dataset.data_infos == [dict(lq_path=lq_paths, gt_path=gt_paths, key=osp.join('00001', '0266'))])
|
def test_sr_test_multiple_gt_dataset():
root_path = ((Path(__file__).parent.parent.parent / 'data') / 'test_multiple_gt')
test_dataset = SRTestMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_1', sequence_length=2), dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_2', sequence_length=1)])
|
def test_sr_folder_multiple_gt_dataset():
root_path = ((Path(__file__).parent.parent.parent / 'data') / 'test_multiple_gt')
test_dataset = SRFolderMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_1', num_input_frames=2, sequence_length=2), dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_2', num_input_frames=1, sequence_length=1)])
test_dataset = SRFolderMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, num_input_frames=1, test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_1', num_input_frames=1, sequence_length=2), dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_2', num_input_frames=1, sequence_length=1)])
txt_content = 'sequence_1 2\n'
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
test_dataset = SRFolderMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, ann_file='fake_ann_file', test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_1', num_input_frames=2, sequence_length=2)])
test_dataset = SRFolderMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, ann_file='fake_ann_file', num_input_frames=1, test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key='sequence_1', num_input_frames=1, sequence_length=2)])
with pytest.raises(ValueError):
SRFolderMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, num_input_frames=(- 1), test_mode=True)
|
def test_sr_folder_video_dataset():
root_path = ((Path(__file__).parent.parent.parent / 'data') / 'test_multiple_gt')
test_dataset = SRFolderVideoDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('sequence_1', '00000000'), num_input_frames=5, max_frame_num=2), dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('sequence_1', '00000001'), num_input_frames=5, max_frame_num=2), dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('sequence_2', '00000000'), num_input_frames=5, max_frame_num=1)])
txt_content = 'sequence_1/00000000 2\n'
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
test_dataset = SRFolderVideoDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, ann_file='fake_ann_file', test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path), gt_path=str(root_path), key=osp.join('sequence_1', '00000000'), num_input_frames=5, max_frame_num=2)])
test_dataset = SRFolderVideoDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, metric_average_mode='clip', test_mode=True)
results = [{'eval_result': {'PSNR': 21, 'SSIM': 0.75}}, {'eval_result': {'PSNR': 23, 'SSIM': 0.85}}, {'eval_result': {'PSNR': 24, 'SSIM': 0.9}}]
eval_result = test_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 23)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.85)
test_dataset = SRFolderVideoDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, metric_average_mode='all', test_mode=True)
eval_result = test_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 22.6666666)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.83333333)
with pytest.raises(AssertionError):
SRFolderVideoDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=6, pipeline=[], scale=4, test_mode=True)
with pytest.raises(ValueError):
SRFolderVideoDataset(lq_folder=root_path, gt_folder=root_path, num_input_frames=5, pipeline=[], scale=4, metric_average_mode='abc', test_mode=False)
with pytest.raises(TypeError):
test_dataset.evaluate(results=5)
with pytest.raises(AssertionError):
test_dataset.evaluate(results=[results[0]])
|
class TestVFIDataset():
pipeline = [dict(type='LoadImageFromFileList', io_backend='disk', key='inputs'), dict(type='LoadImageFromFile', io_backend='disk', key='target'), dict(type='FramesToTensor', keys=['inputs']), dict(type='ImageToTensor', keys=['target'])]
folder = 'tests/data/vimeo90k'
ann_file = 'tests/data/vimeo90k/vfi_ann.txt'
def test_base_vfi_dataset(self):
dataset = BaseVFIDataset(self.pipeline, self.folder, self.ann_file)
dataset.__init__(self.pipeline, self.folder, self.ann_file)
dataset.load_annotations()
assert (dataset.folder == self.folder)
assert (dataset.ann_file == self.ann_file)
setattr(dataset, 'data_infos', [dict(inputs_path=['tests/data/vimeo90k/00001/0266/im1.png', 'tests/data/vimeo90k/00001/0266/im3.png'], target_path='tests/data/vimeo90k/00001/0266/im2.png', key='00001/0266')])
data = dataset.__getitem__(0)
assert_dict_has_keys(data, ['folder', 'ann_file'])
results = [dict(eval_result=dict(psnr=1.1, ssim=0.3))]
eval_result = dataset.evaluate(results)
assert_dict_has_keys(eval_result, ['psnr', 'ssim'])
with pytest.raises(TypeError):
dataset.evaluate(results[0])
with pytest.raises(AssertionError):
dataset.evaluate((results + results))
def test_vfi_vimeo90k_dataset(self):
dataset_cfg = dict(type='VFIVimeo90KDataset', folder=self.folder, ann_file=self.ann_file, pipeline=self.pipeline)
dataset = build_dataset(dataset_cfg)
data_infos = dataset.data_infos[0]
assert_dict_has_keys(data_infos, ['inputs_path', 'target_path', 'key'])
|
def test_vfi_dataset():
test_ = TestVFIDataset()
test_.test_base_vfi_dataset()
test_.test_vfi_vimeo90k_dataset()
|
def check_keys_equal(result_keys, target_keys):
'Check if all elements in target_keys is in result_keys.'
return (set(target_keys) == set(result_keys))
|
def test_compose():
with pytest.raises(TypeError):
Compose('LoadAlpha')
target_keys = ['img', 'meta']
img = np.random.randn(256, 256, 3)
results = dict(img=img, abandoned_key=None, img_name='test_image.png')
test_pipeline = [dict(type='Collect', keys=['img'], meta_keys=['img_name']), dict(type='ImageToTensor', keys=['img'])]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert check_keys_equal(compose_results.keys(), target_keys)
assert check_keys_equal(compose_results['meta'].data.keys(), ['img_name'])
results = None
image_to_tensor = ImageToTensor(keys=[])
test_pipeline = [image_to_tensor]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert (compose_results is None)
assert (repr(compose) == (compose.__class__.__name__ + f'''(
{image_to_tensor}
)'''))
|
def check_keys_contain(result_keys, target_keys):
'Check if all elements in target_keys is in result_keys.'
return set(target_keys).issubset(set(result_keys))
|
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
results = dict(str='0')
to_tensor(results)
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
ori_results = dict(tensor=torch.randn(2, 3), numpy=np.random.randn(2, 3), sequence=list(range(10)), int=1, float=0.1)
results = to_tensor(ori_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, ori_results[key])
ori_results = dict(tensor=torch.randn(2, 3), numpy=np.random.randn(2, 3), sequence=list(range(10)), int=1, float=0.1, str='test')
results = to_tensor(ori_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, ori_results[key])
assert (repr(to_tensor) == (to_tensor.__class__.__name__ + f'(keys={target_keys})'))
|
def test_image_to_tensor():
ori_results = dict(img=np.random.randn(256, 256, 3))
keys = ['img']
to_float32 = False
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([3, 256, 256]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data, ori_results['img'])
assert (results['img'].dtype == torch.float32)
ori_results = dict(img=np.random.randint(256, size=(256, 256)))
keys = ['img']
to_float32 = True
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([1, 256, 256]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data, ori_results['img'])
assert (results['img'].dtype == torch.float32)
assert (repr(image_to_tensor) == (image_to_tensor.__class__.__name__ + f'(keys={keys}, to_float32={to_float32})'))
|
def test_frames_to_tensor():
with pytest.raises(TypeError):
ori_results = dict(img=np.random.randn(12, 12, 3))
FramesToTensor(['img'])(ori_results)
ori_results = dict(img=[np.random.randn(12, 12, 3), np.random.randn(12, 12, 3)])
keys = ['img']
frames_to_tensor = FramesToTensor(keys, to_float32=False)
results = frames_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([2, 3, 12, 12]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data[(0, ...)], ori_results['img'][0])
assert torch.equal(results['img'].data[(1, ...)], ori_results['img'][1])
assert (results['img'].dtype == torch.float64)
ori_results = dict(img=[np.random.randn(12, 12, 3), np.random.randn(12, 12, 3)])
frames_to_tensor = FramesToTensor(keys, to_float32=True)
results = frames_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([2, 3, 12, 12]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data[(0, ...)], ori_results['img'][0])
assert torch.equal(results['img'].data[(1, ...)], ori_results['img'][1])
assert (results['img'].dtype == torch.float32)
ori_results = dict(img=[np.random.randn(12, 12), np.random.randn(12, 12)])
frames_to_tensor = FramesToTensor(keys, to_float32=True)
results = frames_to_tensor(ori_results)
assert (results['img'].shape == torch.Size([2, 1, 12, 12]))
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data[(0, ...)], ori_results['img'][0])
assert torch.equal(results['img'].data[(1, ...)], ori_results['img'][1])
assert (results['img'].dtype == torch.float32)
|
def test_masked_img():
img = np.random.rand(4, 4, 1).astype(np.float32)
mask = np.zeros((4, 4, 1), dtype=np.float32)
mask[(1, 1)] = 1
results = dict(gt_img=img, mask=mask)
get_masked_img = GetMaskedImage()
results = get_masked_img(results)
masked_img = (img * (1.0 - mask))
assert np.array_equal(results['masked_img'], masked_img)
name_ = repr(get_masked_img)
class_name = get_masked_img.__class__.__name__
assert (name_ == (class_name + "(img_name='gt_img', mask_name='mask')"))
|
def test_format_trimap():
ori_trimap = np.random.randint(3, size=(64, 64))
ori_trimap[(ori_trimap == 1)] = 128
ori_trimap[(ori_trimap == 2)] = 255
from mmcv.parallel import DataContainer
ori_result = dict(trimap=torch.from_numpy(ori_trimap.copy()), meta=DataContainer({}))
format_trimap = FormatTrimap(to_onehot=False)
results = format_trimap(ori_result)
result_trimap = results['trimap']
assert (result_trimap.shape == (1, 64, 64))
assert ((result_trimap.numpy() == 0) == (ori_trimap == 0)).all()
assert ((result_trimap.numpy() == 1) == (ori_trimap == 128)).all()
assert ((result_trimap.numpy() == 2) == (ori_trimap == 255)).all()
ori_result = dict(trimap=torch.from_numpy(ori_trimap.copy()), meta=DataContainer({}))
format_trimap = FormatTrimap(to_onehot=True)
results = format_trimap(ori_result)
result_trimap = results['trimap']
assert (result_trimap.shape == (3, 64, 64))
assert ((result_trimap[(0, ...)].numpy() == 1) == (ori_trimap == 0)).all()
assert ((result_trimap[(1, ...)].numpy() == 1) == (ori_trimap == 128)).all()
assert ((result_trimap[(2, ...)].numpy() == 1) == (ori_trimap == 255)).all()
assert (repr(format_trimap) == (format_trimap.__class__.__name__ + '(to_onehot=True)'))
|
def test_collect():
inputs = dict(img=np.random.randn(256, 256, 3), label=[1], img_name='test_image.png', ori_shape=(256, 256, 3), img_shape=(256, 256, 3), pad_shape=(256, 256, 3), flip_direction='vertical', img_norm_cfg=dict(to_bgr=False))
keys = ['img', 'label']
meta_keys = ['img_shape', 'img_name', 'ori_shape']
collect = Collect(keys, meta_keys=meta_keys)
results = collect(inputs)
assert (set(list(results.keys())) == set(['img', 'label', 'meta']))
inputs.pop('img')
assert (set(results['meta'].data.keys()) == set(meta_keys))
for key in results['meta'].data:
assert (results['meta'].data[key] == inputs[key])
assert (repr(collect) == (collect.__class__.__name__ + f'(keys={keys}, meta_keys={collect.meta_keys})'))
|
def test_matlab_like_resize():
results = {}
results['lq'] = np.ones((16, 16, 3))
imresize = MATLABLikeResize(keys=['lq'], scale=0.25)
results = imresize(results)
assert (results['lq'].shape == (4, 4, 3))
results['lq'] = np.ones((16, 16, 3))
imresize = MATLABLikeResize(keys=['lq'], output_shape=(6, 6))
results = imresize(results)
assert (results['lq'].shape == (6, 6, 3))
with pytest.raises(ValueError):
MATLABLikeResize(keys=['lq'], kernel='abc')
with pytest.raises(ValueError):
MATLABLikeResize(keys=['lq'], kernel_width=10)
with pytest.raises(ValueError):
MATLABLikeResize(keys=['lq'])
assert (repr(imresize) == ((imresize.__class__.__name__ + "(keys=['lq'], scale=None, output_shape=(6, 6), ") + 'kernel=bicubic, kernel_width=4.0)'))
|
def test_adjust_gamma():
'Test Gamma Correction\n\n Adpted from\n # https://github.com/scikit-image/scikit-image/blob/7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/exposure/tests/test_exposure.py#L534 # noqa\n '
img = np.ones([1, 1])
result = adjust_gamma(img, 1.5)
assert (img.shape == result.shape)
image = np.random.uniform(0, 255, (8, 8))
result = adjust_gamma(image, 1)
np.testing.assert_array_equal(result, image)
image = np.random.uniform(0, 255, (8, 8))
result = adjust_gamma(image, 0)
dtype = image.dtype.type
np.testing.assert_array_equal(result, dtype_range[dtype][1])
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([[0, 31, 45, 55, 63, 71, 78, 84], [90, 95, 100, 105, 110, 115, 119, 123], [127, 131, 135, 139, 142, 146, 149, 153], [156, 159, 162, 165, 168, 171, 174, 177], [180, 183, 186, 188, 191, 194, 196, 199], [201, 204, 206, 209, 211, 214, 216, 218], [221, 223, 225, 228, 230, 232, 234, 236], [238, 241, 243, 245, 247, 249, 251, 253]], dtype=np.uint8)
result = adjust_gamma(image, 0.5)
np.testing.assert_array_equal(result, expected)
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14], [16, 18, 20, 22, 25, 27, 30, 33], [36, 39, 42, 45, 49, 52, 56, 60], [64, 68, 72, 76, 81, 85, 90, 95], [100, 105, 110, 116, 121, 127, 132, 138], [144, 150, 156, 163, 169, 176, 182, 189], [196, 203, 211, 218, 225, 233, 241, 249]], dtype=np.uint8)
result = adjust_gamma(image, 2)
np.testing.assert_array_equal(result, expected)
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
with pytest.raises(ValueError):
adjust_gamma(image, (- 1))
|
def test_make_coord():
(h, w) = (20, 30)
coord = make_coord((h, w), ranges=((10, 20), ((- 5), 5)))
assert (type(coord) == torch.Tensor)
assert (coord.shape == ((h * w), 2))
coord = make_coord((h, w), flatten=False)
assert (type(coord) == torch.Tensor)
assert (coord.shape == (h, w, 2))
|
def test_random_noise():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomNoise(params=dict(noise_type=['gaussian'], noise_prob=[1], gaussian_sigma=[0, 50], gaussian_gray_noise_prob=1), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomNoise(params=dict(noise_type=['poisson'], noise_prob=[1], poisson_scale=[0, 1], poisson_gray_noise_prob=1), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
params = dict(noise_type=['gaussian'], noise_prob=[1], gaussian_sigma=[0, 50], gaussian_gray_noise_prob=1, prob=0)
model = RandomNoise(params=params, keys=['lq'])
assert (model(results) == results)
assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])"))
|
def test_random_jpeg_compression():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomJPEGCompression(params=dict(quality=[5, 50]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
params = dict(quality=[5, 50], prob=0)
model = RandomJPEGCompression(params=params, keys=['lq'])
assert (model(results) == results)
assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])"))
|
def test_random_video_compression():
results = {}
results['lq'] = ([np.ones((8, 8, 3)).astype(np.float32)] * 5)
model = RandomVideoCompression(params=dict(codec=['libx264', 'h264', 'mpeg4'], codec_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], bitrate=[10000.0, 100000.0]), keys=['lq'])
results = model(results)
assert (results['lq'][0].shape == (8, 8, 3))
assert (len(results['lq']) == 5)
params = dict(codec=['libx264', 'h264', 'mpeg4'], codec_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], bitrate=[10000.0, 100000.0], prob=0)
model = RandomVideoCompression(params=params, keys=['lq'])
assert (model(results) == results)
assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])"))
|
def test_random_resize():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(params=dict(resize_mode_prob=[1, 0, 0], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)]), keys=['lq'])
results = model(results)
assert ((results['lq'].shape[0] >= 8) and (results['lq'].shape[1] >= 8))
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(params=dict(resize_mode_prob=[0, 1, 0], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)]), keys=['lq'])
results = model(results)
assert ((results['lq'].shape[0] <= 8) and (results['lq'].shape[1] <= 8))
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(params=dict(resize_mode_prob=[0, 0, 1], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)]), keys=['lq'])
results = model(results)
assert ((results['lq'].shape[0] == 8) and (results['lq'].shape[1] == 8))
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(params=dict(resize_mode_prob=[0, 0, 1], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], target_size=(16, 32)), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (16, 32, 3))
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(params=dict(resize_mode_prob=[0, 0, 1], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], resize_step=0.05), keys=['lq'])
results = model(results)
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(params=dict(resize_mode_prob=[0, 1, 0], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], resize_step=0.05, is_size_even=True), keys=['lq'])
results = model(results)
assert ((results['lq'].shape[0] % 2) == 0)
assert ((results['lq'].shape[1] % 2) == 0)
model = RandomResize(params=dict(resize_mode_prob=[1, 0, 0], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], prob=0), keys=['lq'])
assert (model(results) == results)
with pytest.raises(NotImplementedError):
params = dict(resize_mode_prob=[1], resize_scale=[1], resize_opt=['abc'], resize_prob=[1])
model = RandomResize(params=params, keys=['lq'])
results = model(results)
assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])"))
|
def test_random_blur():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['iso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['aniso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['generalized_iso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['generalized_aniso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['plateau_iso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['plateau_aniso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[11], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416], omega=[0.1, 0.1]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
params = dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416], prob=0)
model = RandomBlur(params=params, keys=['lq'])
assert (model(results) == results)
assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])"))
|
def test_degradations_with_shuffle():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = DegradationsWithShuffle(degradations=[dict(type='RandomBlur', params=dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416], omega=[0.1, 0.1])), dict(type='RandomResize', params=dict(resize_mode_prob=[0, 0, 1], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], target_size=(16, 16))), [dict(type='RandomJPEGCompression', params=dict(quality=[5, 10])), dict(type='RandomJPEGCompression', params=dict(quality=[15, 20]))]], keys=['lq'], shuffle_idx=None)
model(results)
degradations = [dict(type='RandomBlur', params=dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416], omega=[0.1, 0.1])), dict(type='RandomResize', params=dict(resize_mode_prob=[0, 0, 1], resize_scale=[0.5, 1.5], resize_opt=['bilinear', 'area', 'bicubic'], resize_prob=[(1 / 3.0), (1 / 3.0), (1 / 3.0)], target_size=(16, 16))), [dict(type='RandomJPEGCompression', params=dict(quality=[5, 10])), dict(type='RandomJPEGCompression', params=dict(quality=[15, 20]))]]
model = DegradationsWithShuffle(degradations=degradations, keys=['lq'], shuffle_idx=(1, 2))
model(results)
assert (repr(model) == (((model.__class__.__name__ + f'(degradations={degradations}, ') + "keys=['lq'], ") + 'shuffle_idx=(1, 2))'))
|
def test_random_down_sampling():
img1 = np.uint8((np.random.randn(480, 640, 3) * 255))
inputs1 = dict(gt=img1)
down_sampling1 = RandomDownSampling(scale_min=1, scale_max=4, patch_size=None)
results1 = down_sampling1(inputs1)
assert (set(list(results1.keys())) == set(['gt', 'lq', 'scale']))
assert (repr(down_sampling1) == (((((down_sampling1.__class__.__name__ + f' scale_min={down_sampling1.scale_min}, ') + f'scale_max={down_sampling1.scale_max}, ') + f'patch_size={down_sampling1.patch_size}, ') + f'interpolation={down_sampling1.interpolation}, ') + f'backend={down_sampling1.backend}'))
img2 = np.uint8((np.random.randn(480, 640, 3) * 255))
inputs2 = dict(gt=img2)
down_sampling2 = RandomDownSampling(scale_min=1, scale_max=4, patch_size=48)
results2 = down_sampling2(inputs2)
assert (set(list(results2.keys())) == set(['gt', 'lq', 'scale']))
assert (repr(down_sampling2) == (((((down_sampling2.__class__.__name__ + f' scale_min={down_sampling2.scale_min}, ') + f'scale_max={down_sampling2.scale_max}, ') + f'patch_size={down_sampling2.patch_size}, ') + f'interpolation={down_sampling2.interpolation}, ') + f'backend={down_sampling2.backend}'))
|
def test_restoration_video_inference():
if torch.cuda.is_available():
model = init_model('./configs/restorers/basicvsr/basicvsr_reds4.py', None, device='cuda')
img_dir = './tests/data/vimeo90k/00001/0266'
window_size = 0
start_idx = 1
filename_tmpl = 'im{}.png'
output = restoration_video_inference(model, img_dir, window_size, start_idx, filename_tmpl)
assert (output.shape == (1, 7, 3, 256, 448))
window_size = 5
model = init_model('./configs/restorers/edvr/edvrm_wotsa_x4_g8_600k_reds.py', None, device='cuda')
output = restoration_video_inference(model, img_dir, window_size, start_idx, filename_tmpl)
assert (output.shape == (1, 7, 3, 256, 448))
model.cfg.test_pipeline = model.cfg.demo_pipeline
model.cfg.pop('demo_pipeline')
output = restoration_video_inference(model, img_dir, window_size, start_idx, filename_tmpl)
assert (output.shape == (1, 7, 3, 256, 448))
model.cfg.val_pipeline = model.cfg.test_pipeline
model.cfg.pop('test_pipeline')
output = restoration_video_inference(model, img_dir, window_size, start_idx, filename_tmpl)
assert (output.shape == (1, 7, 3, 256, 448))
with pytest.raises(TypeError):
model.cfg.val_pipeline = model.cfg.val_pipeline[1:]
output = restoration_video_inference(model, img_dir, window_size, start_idx, filename_tmpl)
model = init_model('./configs/restorers/basicvsr/basicvsr_reds4.py', None, device='cuda')
img_dir = './tests/data/test_inference.mp4'
window_size = 0
start_idx = 1
filename_tmpl = 'im{}.png'
output = restoration_video_inference(model, img_dir, window_size, start_idx, filename_tmpl)
assert (output.shape == (1, 5, 3, 256, 256))
|
def test_video_interpolation_inference():
model = init_model('./configs/video_interpolators/cain/cain_b5_320k_vimeo-triplet.py', None, device='cpu')
model.cfg['demo_pipeline'] = [dict(type='LoadImageFromFileList', io_backend='disk', key='inputs', channel_order='rgb'), dict(type='RescaleToZeroOne', keys=['inputs']), dict(type='FramesToTensor', keys=['inputs']), dict(type='Collect', keys=['inputs'], meta_keys=['inputs_path', 'key'])]
input_dir = './tests/data/vimeo90k/00001/0266'
output_dir = './tests/data/vimeo90k/00001/out'
os.mkdir(output_dir)
video_interpolation_inference(model, input_dir, output_dir, batch_size=10)
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(model, input_dir, output_dir)
with pytest.raises(AssertionError):
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(model, input_dir, output_dir, fps_multiplier=(- 1))
if torch.cuda.is_available():
model = init_model('./configs/video_interpolators/cain/cain_b5_320k_vimeo-triplet.py', None, device='cuda')
model.cfg['demo_pipeline'] = [dict(type='LoadImageFromFileList', io_backend='disk', key='inputs', channel_order='rgb'), dict(type='RescaleToZeroOne', keys=['inputs']), dict(type='FramesToTensor', keys=['inputs']), dict(type='Collect', keys=['inputs'], meta_keys=['inputs_path', 'key'])]
input_dir = './tests/data/vimeo90k/00001/0266'
output_dir = './tests/data/vimeo90k/00001'
video_interpolation_inference(model, input_dir, output_dir, batch_size=10)
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(model, input_dir, output_dir)
with pytest.raises(AssertionError):
input_dir = './tests/data/test_inference.mp4'
output_dir = './tests/data/test_inference_out.mp4'
video_interpolation_inference(model, input_dir, output_dir, fps_multiplier=(- 1))
shutil.rmtree('./tests/data/vimeo90k/00001/out')
os.remove('./tests/data/test_inference_out.mp4')
|
def test_gl_encdec():
input_x = torch.randn(1, 4, 256, 256)
template_cfg = dict(type='AOTEncoderDecoder')
aot_encdec = build_backbone(template_cfg)
aot_encdec.init_weights()
output = aot_encdec(input_x)
assert (output.shape == (1, 3, 256, 256))
cfg_ = template_cfg.copy()
cfg_['encoder'] = dict(type='AOTEncoder')
aot_encdec = build_backbone(cfg_)
output = aot_encdec(input_x)
assert (output.shape == (1, 3, 256, 256))
cfg_ = template_cfg.copy()
cfg_['decoder'] = dict(type='AOTDecoder')
aot_encdec = build_backbone(cfg_)
output = aot_encdec(input_x)
assert (output.shape == (1, 3, 256, 256))
if torch.cuda.is_available():
aot_encdec = build_backbone(template_cfg)
aot_encdec.init_weights()
aot_encdec = aot_encdec.cuda()
output = aot_encdec(input_x.cuda())
assert (output.shape == (1, 3, 256, 256))
|
def test_aot_dilation_neck():
neck = AOTBlockNeck(in_channels=256, dilation_rates=(1, 2, 4, 8), num_aotblock=8)
x = torch.rand((2, 256, 64, 64))
res = neck(x)
assert (res.shape == (2, 256, 64, 64))
if torch.cuda.is_available():
neck = AOTBlockNeck(in_channels=256, dilation_rates=(1, 2, 4, 8), num_aotblock=8).cuda()
x = torch.rand((2, 256, 64, 64)).cuda()
res = neck(x)
assert (res.shape == (2, 256, 64, 64))
|
def assert_tensor_with_shape(tensor, shape):
'"Check if the shape of the tensor is equal to the target shape.'
assert isinstance(tensor, torch.Tensor)
assert (tensor.shape == shape)
|
def _demo_inputs(input_shape=(1, 4, 64, 64)):
'\n Create a superset of inputs needed to run encoder.\n\n Args:\n input_shape (tuple): input batch dimensions.\n Default: (1, 4, 64, 64).\n '
img = np.random.random(input_shape).astype(np.float32)
img = torch.from_numpy(img)
return img
|
def test_plain_decoder():
'Test PlainDecoder.'
model = PlainDecoder(512)
model.init_weights()
model.train()
encoder = VGG16(4)
img = _demo_inputs()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
if torch.cuda.is_available():
model = PlainDecoder(512)
model.init_weights()
model.train()
model.cuda()
encoder = VGG16(4)
encoder.cuda()
img = _demo_inputs().cuda()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
|
def test_resnet_decoder():
'Test resnet decoder.'
with pytest.raises(NotImplementedError):
ResNetDec('UnknowBlock', [2, 3, 3, 2], 512)
model = ResNetDec('BasicBlockDec', [2, 3, 3, 2], 512, kernel_size=5)
model.init_weights()
model.train()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((1, 6, 64, 64))
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
model = ResNetDec('BasicBlockDec', [2, 3, 3, 2], 512, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((1, 6, 64, 64))
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
if torch.cuda.is_available():
model = ResNetDec('BasicBlockDec', [2, 3, 3, 2], 512, kernel_size=5)
model.init_weights()
model.train()
model.cuda()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((1, 6, 64, 64)).cuda()
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
model = ResNetDec('BasicBlockDec', [2, 3, 3, 2], 512, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
encoder = ResNetEnc('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((1, 6, 64, 64)).cuda()
feat = encoder(img)
prediction = model(feat)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
|
def test_res_shortcut_decoder():
'Test resnet decoder with shortcut.'
with pytest.raises(NotImplementedError):
ResShortcutDec('UnknowBlock', [2, 3, 3, 2], 512)
model = ResShortcutDec('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
encoder = ResShortcutEnc('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((1, 6, 64, 64))
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
if torch.cuda.is_available():
model = ResShortcutDec('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
model.cuda()
encoder = ResShortcutEnc('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((1, 6, 64, 64)).cuda()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
|
def test_res_gca_decoder():
'Test resnet decoder with shortcut and guided contextual attention.'
with pytest.raises(NotImplementedError):
ResGCADecoder('UnknowBlock', [2, 3, 3, 2], 512)
model = ResGCADecoder('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
encoder = ResGCAEncoder('BasicBlock', [2, 4, 4, 2], 6)
img = _demo_inputs((2, 6, 32, 32))
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([2, 1, 32, 32]))
if torch.cuda.is_available():
model = ResGCADecoder('BasicBlockDec', [2, 3, 3, 2], 512)
model.init_weights()
model.train()
model.cuda()
encoder = ResGCAEncoder('BasicBlock', [2, 4, 4, 2], 6)
encoder.cuda()
img = _demo_inputs((2, 6, 32, 32)).cuda()
outputs = encoder(img)
prediction = model(outputs)
assert_tensor_with_shape(prediction, torch.Size([2, 1, 32, 32]))
|
def test_indexed_upsample():
'Test indexed upsample module for indexnet decoder.'
indexed_upsample = IndexedUpsample(12, 12)
x = torch.rand(2, 6, 32, 32)
shortcut = torch.rand(2, 6, 32, 32)
output = indexed_upsample(x, shortcut)
assert_tensor_with_shape(output, (2, 12, 32, 32))
x = torch.rand(2, 6, 32, 32)
dec_idx_feat = torch.rand(2, 6, 64, 64)
shortcut = torch.rand(2, 6, 64, 64)
output = indexed_upsample(x, shortcut, dec_idx_feat)
assert_tensor_with_shape(output, (2, 12, 64, 64))
|
def test_indexnet_decoder():
'Test Indexnet decoder.'
with pytest.raises(AssertionError):
indexnet_decoder = IndexNetDecoder(160, kernel_size=5, separable_conv=False)
x = torch.rand(2, 256, 4, 4)
shortcut = torch.rand(2, 128, 8, 8, 8)
dec_idx_feat = torch.rand(2, 128, 8, 8, 8)
outputs_enc = dict(out=x, shortcuts=[shortcut], dec_idx_feat_list=[dec_idx_feat])
indexnet_decoder(outputs_enc)
indexnet_decoder = IndexNetDecoder(160, kernel_size=5, separable_conv=False)
indexnet_decoder.init_weights()
indexnet_encoder = IndexNetEncoder(4)
x = torch.rand(2, 4, 32, 32)
outputs_enc = indexnet_encoder(x)
out = indexnet_decoder(outputs_enc)
assert (out.shape == (2, 1, 32, 32))
indexnet_decoder = IndexNetDecoder(160, kernel_size=3, separable_conv=True)
indexnet_decoder.init_weights()
out = indexnet_decoder(outputs_enc)
assert (out.shape == (2, 1, 32, 32))
|
def test_fba_decoder():
with pytest.raises(AssertionError):
FBADecoder(pool_scales=1, in_channels=32, channels=16)
inputs = dict()
conv_out_1 = _demo_inputs((1, 11, 320, 320))
conv_out_2 = _demo_inputs((1, 64, 160, 160))
conv_out_3 = _demo_inputs((1, 256, 80, 80))
conv_out_4 = _demo_inputs((1, 512, 40, 40))
conv_out_5 = _demo_inputs((1, 1024, 40, 40))
conv_out_6 = _demo_inputs((1, 2048, 40, 40))
inputs['conv_out'] = [conv_out_1, conv_out_2, conv_out_3, conv_out_4, conv_out_5, conv_out_6]
inputs['merged'] = _demo_inputs((1, 3, 320, 320))
inputs['two_channel_trimap'] = _demo_inputs((1, 2, 320, 320))
model = FBADecoder(pool_scales=(1, 2, 3, 6), in_channels=2048, channels=256, norm_cfg=dict(type='GN', num_groups=32))
(alpha, F, B) = model(inputs)
assert_tensor_with_shape(alpha, torch.Size([1, 1, 320, 320]))
assert_tensor_with_shape(F, torch.Size([1, 3, 320, 320]))
assert_tensor_with_shape(B, torch.Size([1, 3, 320, 320]))
|
def test_deepfill_dec():
decoder = DeepFillDecoder(128, out_act_cfg=None)
assert (not decoder.with_out_activation)
decoder = DeepFillDecoder(128)
x = torch.randn((2, 128, 64, 64))
input_dict = dict(out=x)
res = decoder(input_dict)
assert (res.shape == (2, 3, 256, 256))
assert (decoder.dec2.stride == (1, 1))
assert (decoder.dec2.out_channels == 128)
assert (not decoder.dec7.with_activation)
assert ((res.min().item() >= (- 1.0)) and (res.max().item() <= 1))
if torch.cuda.is_available():
decoder = DeepFillDecoder(128).cuda()
x = torch.randn((2, 128, 64, 64)).cuda()
input_dict = dict(out=x)
res = decoder(input_dict)
assert (res.shape == (2, 3, 256, 256))
assert (decoder.dec2.stride == (1, 1))
assert (decoder.dec2.out_channels == 128)
assert (not decoder.dec7.with_activation)
assert ((res.min().item() >= (- 1.0)) and (res.max().item() <= 1))
decoder = DeepFillDecoder(128, conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 128, 64, 64)).cuda()
input_dict = dict(out=x)
res = decoder(input_dict)
assert (res.shape == (2, 3, 256, 256))
assert (decoder.dec2.conv.stride == (1, 1))
assert (decoder.dec2.conv.out_channels == (96 * 2))
assert (not decoder.dec7.with_feat_act)
assert ((res.min().item() >= (- 1.0)) and (res.max().item() <= 1))
|
def assert_dict_keys_equal(dictionary, target_keys):
'Check if the keys of the dictionary is equal to the target key set.'
assert isinstance(dictionary, dict)
assert (set(dictionary.keys()) == set(target_keys))
|
def assert_tensor_with_shape(tensor, shape):
'"Check if the shape of the tensor is equal to the target shape.'
assert isinstance(tensor, torch.Tensor)
assert (tensor.shape == shape)
|
def test_encoder_decoder():
'Test SimpleEncoderDecoder.'
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
(fg, bg, merged, alpha, trimap) = _demo_inputs_pair()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
(fg, bg, merged, alpha, trimap) = _demo_inputs_pair()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
(fg, bg, merged, alpha, trimap) = _demo_inputs_pair()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
if torch.cuda.is_available():
encoder = dict(type='VGG16', in_channels=4)
decoder = dict(type='PlainDecoder')
model = SimpleEncoderDecoder(encoder, decoder)
model.init_weights()
model.train()
(fg, bg, merged, alpha, trimap) = _demo_inputs_pair(cuda=True)
model.cuda()
prediction = model(torch.cat([merged, trimap], 1))
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
|
def _demo_inputs_pair(img_shape=(64, 64), batch_size=1, cuda=False):
'\n Create a superset of inputs needed to run backbone.\n\n Args:\n img_shape (tuple): shape of the input image.\n batch_size (int): batch size of the input batch.\n cuda (bool): whether transfer input into gpu.\n '
color_shape = (batch_size, 3, img_shape[0], img_shape[1])
gray_shape = (batch_size, 1, img_shape[0], img_shape[1])
fg = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
bg = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
trimap = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
if cuda:
fg = fg.cuda()
bg = bg.cuda()
merged = merged.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
return (fg, bg, merged, alpha, trimap)
|
def check_norm_state(modules, train_state):
'Check if norm layer is in correct train state.'
for mod in modules:
if isinstance(mod, _BatchNorm):
if (mod.training != train_state):
return False
return True
|
def is_block(modules):
'Check if is ResNet building block.'
if isinstance(modules, (BasicBlock, Bottleneck)):
return True
return False
|
def assert_tensor_with_shape(tensor, shape):
'"Check if the shape of the tensor is equal to the target shape.'
assert isinstance(tensor, torch.Tensor)
assert (tensor.shape == shape)
|
def assert_mid_feat_shape(mid_feat, target_shape):
assert (len(mid_feat) == 5)
for i in range(5):
assert_tensor_with_shape(mid_feat[i], torch.Size(target_shape[i]))
|
def _demo_inputs(input_shape=(2, 4, 64, 64)):
'\n Create a superset of inputs needed to run encoder.\n\n Args:\n input_shape (tuple): input batch dimensions.\n Default: (1, 4, 64, 64).\n '
img = np.random.random(input_shape).astype(np.float32)
img = torch.from_numpy(img)
return img
|
def test_vgg16_encoder():
'Test VGG16 encoder.'
target_shape = [(2, 64, 32, 32), (2, 128, 16, 16), (2, 256, 8, 8), (2, 512, 4, 4), (2, 512, 2, 2)]
model = VGG16(4)
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, batch_norm=True)
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, aspp=True, dilations=[6, 12, 18])
model.init_weights()
model.train()
img = _demo_inputs()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 256, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
assert check_norm_state(model.modules(), True)
if torch.cuda.is_available():
model = VGG16(4)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, batch_norm=True)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
model = VGG16(4, aspp=True, dilations=[6, 12, 18])
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs().cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 256, 2, 2))
assert_tensor_with_shape(outputs['max_idx_1'], target_shape[0])
assert_tensor_with_shape(outputs['max_idx_2'], target_shape[1])
assert_tensor_with_shape(outputs['max_idx_3'], target_shape[2])
assert_tensor_with_shape(outputs['max_idx_4'], target_shape[3])
assert_tensor_with_shape(outputs['max_idx_5'], target_shape[4])
assert check_norm_state(model.modules(), True)
|
def test_resnet_encoder():
'Test resnet encoder.'
with pytest.raises(NotImplementedError):
ResNetEnc('UnknownBlock', [3, 4, 4, 2], 3)
with pytest.raises(TypeError):
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 3)
model.init_weights(list())
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
img = _demo_inputs((2, 4, 64, 64))
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
img = _demo_inputs((2, 6, 64, 64))
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
if torch.cuda.is_available():
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 4, 64, 64)).cuda()
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
model = ResNetEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 6, 64, 64)).cuda()
feat = model(img)
assert_tensor_with_shape(feat, torch.Size([2, 512, 2, 2]))
|
def test_res_shortcut_encoder():
'Test resnet encoder with shortcut.'
with pytest.raises(NotImplementedError):
ResShortcutEnc('UnknownBlock', [3, 4, 4, 2], 3)
target_shape = [(2, 32, 64, 64), (2, 32, 32, 32), (2, 64, 16, 16), (2, 128, 8, 8), (2, 256, 4, 4)]
target_late_ds_shape = [(2, 32, 64, 64), (2, 64, 32, 32), (2, 64, 16, 16), (2, 128, 8, 8), (2, 256, 4, 4)]
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
img = _demo_inputs((2, 4, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_late_ds_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_late_ds_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_late_ds_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_late_ds_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_late_ds_shape[4])
if torch.cuda.is_available():
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 4, with_spectral_norm=True)
assert hasattr(model.conv1.conv, 'weight_orig')
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 4, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_shape[4])
model = ResShortcutEnc('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['feat1'], target_late_ds_shape[0])
assert_tensor_with_shape(outputs['feat2'], target_late_ds_shape[1])
assert_tensor_with_shape(outputs['feat3'], target_late_ds_shape[2])
assert_tensor_with_shape(outputs['feat4'], target_late_ds_shape[3])
assert_tensor_with_shape(outputs['feat5'], target_late_ds_shape[4])
|
def test_res_gca_encoder():
'Test resnet encoder with shortcut and guided contextual attention.'
with pytest.raises(NotImplementedError):
ResGCAEncoder('UnknownBlock', [3, 4, 4, 2], 3)
target_shape = [(2, 32, 64, 64), (2, 32, 32, 32), (2, 64, 16, 16), (2, 128, 8, 8), (2, 256, 4, 4)]
target_late_ds = [(2, 32, 64, 64), (2, 64, 32, 32), (2, 64, 16, 16), (2, 128, 8, 8), (2, 256, 4, 4)]
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 4)
model.init_weights()
model.train()
img = _demo_inputs((2, 4, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{(i + 1)}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{(i + 1)}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
img = _demo_inputs((2, 6, 64, 64))
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{(i + 1)}'], target_late_ds[i])
if torch.cuda.is_available():
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 4)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 4, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{(i + 1)}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{(i + 1)}'], target_shape[i])
model = ResGCAEncoder('BasicBlock', [3, 4, 4, 2], 6, late_downsample=True)
model.init_weights()
model.train()
model.cuda()
img = _demo_inputs((2, 6, 64, 64)).cuda()
outputs = model(img)
assert_tensor_with_shape(outputs['out'], (2, 512, 2, 2))
assert_tensor_with_shape(outputs['img_feat'], (2, 128, 8, 8))
assert_tensor_with_shape(outputs['unknown'], (2, 1, 8, 8))
for i in range(5):
assert_tensor_with_shape(outputs[f'feat{(i + 1)}'], target_late_ds[i])
|
def test_index_blocks():
'Test index blocks for indexnet encoder.'
block = HolisticIndexBlock(128, use_context=False, use_nonlinear=False)
assert (not isinstance(block.index_block, Iterable))
x = torch.rand(2, 128, 8, 8)
(enc_idx_feat, dec_idx_feat) = block(x)
assert (enc_idx_feat.shape == (2, 1, 8, 8))
assert (dec_idx_feat.shape == (2, 1, 8, 8))
block = HolisticIndexBlock(128, use_context=True, use_nonlinear=True)
assert (len(block.index_block) == 2)
x = torch.rand(2, 128, 8, 8)
(enc_idx_feat, dec_idx_feat) = block(x)
assert (enc_idx_feat.shape == (2, 1, 8, 8))
assert (dec_idx_feat.shape == (2, 1, 8, 8))
block = DepthwiseIndexBlock(128, use_context=False, mode='oso', use_nonlinear=False)
assert (not isinstance(block.index_blocks[0], Iterable))
x = torch.rand(2, 128, 8, 8)
(enc_idx_feat, dec_idx_feat) = block(x)
assert (enc_idx_feat.shape == (2, 128, 8, 8))
assert (dec_idx_feat.shape == (2, 128, 8, 8))
block = DepthwiseIndexBlock(128, use_context=True, mode='m2o', use_nonlinear=True)
assert (len(block.index_blocks[0]) == 2)
x = torch.rand(2, 128, 8, 8)
(enc_idx_feat, dec_idx_feat) = block(x)
assert (enc_idx_feat.shape == (2, 128, 8, 8))
assert (dec_idx_feat.shape == (2, 128, 8, 8))
|
def test_indexnet_encoder():
'Test Indexnet encoder.'
with pytest.raises(ValueError):
IndexNetEncoder(4, out_stride=8)
with pytest.raises(NameError):
IndexNetEncoder(4, index_mode='unknown_mode')
indexnet_encoder = IndexNetEncoder(4, out_stride=32, width_mult=1, index_mode='m2o', aspp=True, use_nonlinear=True, use_context=True)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert (outputs['out'].shape == (2, 160, 1, 1))
assert (len(outputs['shortcuts']) == 7)
target_shapes = [(2, 32, 32, 32), (2, 16, 16, 16), (2, 24, 16, 16), (2, 32, 8, 8), (2, 64, 4, 4), (2, 96, 2, 2), (2, 160, 2, 2)]
for (shortcut, target_shape) in zip(outputs['shortcuts'], target_shapes):
assert (shortcut.shape == target_shape)
assert (len(outputs['dec_idx_feat_list']) == 7)
target_shapes = [(2, 32, 32, 32), None, (2, 24, 16, 16), (2, 32, 8, 8), (2, 64, 4, 4), None, (2, 160, 2, 2)]
for (dec_idx_feat, target_shape) in zip(outputs['dec_idx_feat_list'], target_shapes):
if (dec_idx_feat is not None):
assert (dec_idx_feat.shape == target_shape)
indexnet_encoder = IndexNetEncoder(4, out_stride=16, width_mult=2, index_mode='o2o', aspp=False, use_nonlinear=False, use_context=False)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert (outputs['out'].shape == (2, 160, 2, 2))
assert (len(outputs['shortcuts']) == 7)
target_shapes = [(2, 64, 32, 32), (2, 32, 16, 16), (2, 48, 16, 16), (2, 64, 8, 8), (2, 128, 4, 4), (2, 192, 2, 2), (2, 320, 2, 2)]
for (shortcut, target_shape) in zip(outputs['shortcuts'], target_shapes):
assert (shortcut.shape == target_shape)
assert (len(outputs['dec_idx_feat_list']) == 7)
target_shapes = [(2, 64, 32, 32), None, (2, 48, 16, 16), (2, 64, 8, 8), (2, 128, 4, 4), None, None]
for (dec_idx_feat, target_shape) in zip(outputs['dec_idx_feat_list'], target_shapes):
if (dec_idx_feat is not None):
assert (dec_idx_feat.shape == target_shape)
indexnet_encoder = IndexNetEncoder(4, out_stride=16, width_mult=2, index_mode='holistic', aspp=False, freeze_bn=True, use_nonlinear=False, use_context=False)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert (outputs['out'].shape == (2, 160, 2, 2))
assert (len(outputs['shortcuts']) == 7)
target_shapes = [(2, 64, 32, 32), (2, 32, 16, 16), (2, 48, 16, 16), (2, 64, 8, 8), (2, 128, 4, 4), (2, 192, 2, 2), (2, 320, 2, 2)]
for (shortcut, target_shape) in zip(outputs['shortcuts'], target_shapes):
assert (shortcut.shape == target_shape)
assert (len(outputs['dec_idx_feat_list']) == 7)
target_shapes = [(2, 1, 32, 32), None, (2, 1, 16, 16), (2, 1, 8, 8), (2, 1, 4, 4), None, None]
for (dec_idx_feat, target_shape) in zip(outputs['dec_idx_feat_list'], target_shapes):
if (dec_idx_feat is not None):
assert (dec_idx_feat.shape == target_shape)
|
def test_fba_encoder():
'Test FBA encoder.'
with pytest.raises(KeyError):
FBAResnetDilated(20, in_channels=11, stem_channels=64, base_channels=64)
with pytest.raises(AssertionError):
FBAResnetDilated(50, in_channels=11, stem_channels=64, base_channels=64, num_stages=0)
with pytest.raises(AssertionError):
FBAResnetDilated(50, in_channels=11, stem_channels=64, base_channels=64, num_stages=5)
with pytest.raises(AssertionError):
FBAResnetDilated(50, in_channels=11, stem_channels=64, base_channels=64, strides=(1,), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
model = FBAResnetDilated(50, in_channels=11, stem_channels=64, base_channels=64)
model.init_weights(pretrained=233)
model = FBAResnetDilated(depth=50, in_channels=11, stem_channels=64, base_channels=64, conv_cfg=dict(type='ConvWS'), norm_cfg=dict(type='GN', num_groups=32))
model.init_weights()
model.train()
input = _demo_inputs((1, 14, 320, 320))
output = model(input)
assert ('conv_out' in output.keys())
assert ('merged' in output.keys())
assert ('two_channel_trimap' in output.keys())
assert isinstance(output['conv_out'], list)
assert (len(output['conv_out']) == 6)
assert isinstance(output['merged'], torch.Tensor)
assert_tensor_with_shape(output['merged'], torch.Size([1, 3, 320, 320]))
assert isinstance(output['two_channel_trimap'], torch.Tensor)
assert_tensor_with_shape(output['two_channel_trimap'], torch.Size([1, 2, 320, 320]))
if torch.cuda.is_available():
model = FBAResnetDilated(depth=50, in_channels=11, stem_channels=64, base_channels=64, conv_cfg=dict(type='ConvWS'), norm_cfg=dict(type='GN', num_groups=32))
model.init_weights()
model.train()
model.cuda()
input = _demo_inputs((1, 14, 320, 320)).cuda()
output = model(input)
assert ('conv_out' in output.keys())
assert ('merged' in output.keys())
assert ('two_channel_trimap' in output.keys())
assert isinstance(output['conv_out'], list)
assert (len(output['conv_out']) == 6)
assert isinstance(output['merged'], torch.Tensor)
assert_tensor_with_shape(output['merged'], torch.Size([1, 3, 320, 320]))
assert isinstance(output['two_channel_trimap'], torch.Tensor)
assert_tensor_with_shape(output['two_channel_trimap'], torch.Size([1, 2, 320, 320]))
|
def test_gl_encdec():
input_x = torch.randn(1, 4, 256, 256)
template_cfg = dict(type='GLEncoderDecoder')
gl_encdec = build_backbone(template_cfg)
gl_encdec.init_weights()
output = gl_encdec(input_x)
assert (output.shape == (1, 3, 256, 256))
cfg_ = template_cfg.copy()
cfg_['decoder'] = dict(type='GLDecoder', out_act='sigmoid')
gl_encdec = build_backbone(cfg_)
output = gl_encdec(input_x)
assert (output.shape == (1, 3, 256, 256))
with pytest.raises(ValueError):
cfg_ = template_cfg.copy()
cfg_['decoder'] = dict(type='GLDecoder', out_act='igccc')
gl_encdec = build_backbone(cfg_)
with pytest.raises(TypeError):
gl_encdec.init_weights(pretrained=dict(igccc=4396))
if torch.cuda.is_available():
gl_encdec = build_backbone(template_cfg)
gl_encdec.init_weights()
gl_encdec = gl_encdec.cuda()
output = gl_encdec(input_x.cuda())
assert (output.shape == (1, 3, 256, 256))
|
def test_gl_dilation_neck():
neck = GLDilationNeck(in_channels=8)
x = torch.rand((2, 8, 64, 64))
res = neck(x)
assert (res.shape == (2, 8, 64, 64))
if torch.cuda.is_available():
neck = GLDilationNeck(in_channels=8).cuda()
x = torch.rand((2, 8, 64, 64)).cuda()
res = neck(x)
assert (res.shape == (2, 8, 64, 64))
neck = GLDilationNeck(in_channels=8, conv_type='gated_conv').cuda()
res = neck(x)
assert isinstance(neck.dilation_convs[0], SimpleGatedConvModule)
assert (res.shape == (2, 8, 64, 64))
|
def test_gl_discs():
global_disc_cfg = dict(in_channels=3, max_channels=512, fc_in_channels=((512 * 4) * 4), fc_out_channels=1024, num_convs=6, norm_cfg=dict(type='BN'))
local_disc_cfg = dict(in_channels=3, max_channels=512, fc_in_channels=((512 * 4) * 4), fc_out_channels=1024, num_convs=5, norm_cfg=dict(type='BN'))
gl_disc_cfg = dict(type='GLDiscs', global_disc_cfg=global_disc_cfg, local_disc_cfg=local_disc_cfg)
gl_discs = build_component(gl_disc_cfg)
gl_discs.init_weights()
input_g = torch.randn(1, 3, 256, 256)
input_l = torch.randn(1, 3, 128, 128)
output = gl_discs((input_g, input_l))
assert (output.shape == (1, 1))
with pytest.raises(TypeError):
gl_discs.init_weights(pretrained=dict(igccc=777))
if torch.cuda.is_available():
gl_discs = gl_discs.cuda()
input_g = torch.randn(1, 3, 256, 256).cuda()
input_l = torch.randn(1, 3, 128, 128).cuda()
output = gl_discs((input_g, input_l))
assert (output.shape == (1, 1))
|
def test_unet_skip_connection_block():
_cfg = dict(outer_channels=1, inner_channels=1, in_channels=None, submodule=None, is_outermost=False, is_innermost=False, norm_cfg=dict(type='BN'), use_dropout=True)
feature_shape = (1, 1, 8, 8)
feature = _demo_inputs(feature_shape)
input_shape = (1, 3, 8, 8)
img = _demo_inputs(input_shape)
cfg = copy.deepcopy(_cfg)
cfg['is_innermost'] = True
block = UnetSkipConnectionBlock(**cfg)
output = block(feature)
assert (output.shape == (1, 2, 8, 8))
if torch.cuda.is_available():
block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 2, 8, 8))
block.cpu()
cfg = copy.deepcopy(_cfg)
cfg['submodule'] = block
block = UnetSkipConnectionBlock(**cfg)
output = block(feature)
assert (output.shape == (1, 2, 8, 8))
if torch.cuda.is_available():
block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 2, 8, 8))
block.cpu()
cfg = copy.deepcopy(_cfg)
cfg['submodule'] = block
cfg['is_outermost'] = True
cfg['in_channels'] = 3
cfg['outer_channels'] = 3
block = UnetSkipConnectionBlock(**cfg)
output = block(img)
assert (output.shape == (1, 3, 8, 8))
if torch.cuda.is_available():
block.cuda()
output = block(img.cuda())
assert (output.shape == (1, 3, 8, 8))
block.cpu()
cfg = copy.deepcopy(_cfg)
cfg['is_innermost'] = True
cfg['is_outermost'] = True
with pytest.raises(AssertionError):
_ = UnetSkipConnectionBlock(**cfg)
bad_cfg = copy.deepcopy(_cfg)
bad_cfg['is_innermost'] = True
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = UnetSkipConnectionBlock(**bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = UnetSkipConnectionBlock(**bad_cfg)
|
def test_unet_generator():
cfg = dict(type='UnetGenerator', in_channels=3, out_channels=3, num_down=8, base_channels=64, norm_cfg=dict(type='BN'), use_dropout=True, init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 256, 256))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 3, 256, 256))
cfg = dict(type='UnetGenerator', in_channels=1, out_channels=3, num_down=8, base_channels=64, norm_cfg=dict(type='BN'), use_dropout=True, init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
input_shape = (1, 1, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 256, 256))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 3, 256, 256))
cfg = dict(type='UnetGenerator', in_channels=3, out_channels=1, num_down=8, base_channels=64, norm_cfg=dict(type='BN'), use_dropout=True, init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 1, 256, 256))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 1, 256, 256))
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
bad_cfg = copy.deepcopy(cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
|
def test_residual_block_with_dropout():
_cfg = dict(channels=3, padding_mode='reflect', norm_cfg=dict(type='BN'), use_dropout=True)
feature_shape = (1, 3, 32, 32)
feature = _demo_inputs(feature_shape)
block = ResidualBlockWithDropout(**_cfg)
output = block(feature)
assert (output.shape == (1, 3, 32, 32))
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 3, 32, 32))
cfg = copy.deepcopy(_cfg)
cfg['padding_mode'] = 'replicate'
block = ResidualBlockWithDropout(**cfg)
output = block(feature)
assert (output.shape == (1, 3, 32, 32))
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 3, 32, 32))
cfg = copy.deepcopy(_cfg)
cfg['padding_mode'] = 'zeros'
block = ResidualBlockWithDropout(**cfg)
output = block(feature)
assert (output.shape == (1, 3, 32, 32))
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 3, 32, 32))
cfg = copy.deepcopy(_cfg)
cfg['padding_mode'] = 'abc'
with pytest.raises(KeyError):
block = ResidualBlockWithDropout(**cfg)
cfg = copy.deepcopy(_cfg)
cfg['norm_cfg'] = dict(type='IN')
block = ResidualBlockWithDropout(**cfg)
output = block(feature)
assert (output.shape == (1, 3, 32, 32))
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 3, 32, 32))
cfg = copy.deepcopy(_cfg)
cfg['use_dropout'] = False
block = ResidualBlockWithDropout(**cfg)
output = block(feature)
assert (output.shape == (1, 3, 32, 32))
if torch.cuda.is_available():
block = block.cuda()
output = block(feature.cuda())
assert (output.shape == (1, 3, 32, 32))
bad_cfg = copy.deepcopy(_cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = ResidualBlockWithDropout(**bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = ResidualBlockWithDropout(**bad_cfg)
|
def test_resnet_generator():
cfg = dict(type='ResnetGenerator', in_channels=3, out_channels=3, base_channels=64, norm_cfg=dict(type='IN'), use_dropout=False, num_blocks=9, padding_mode='reflect', init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 256, 256))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 3, 256, 256))
cfg = dict(type='ResnetGenerator', in_channels=1, out_channels=3, base_channels=64, norm_cfg=dict(type='IN'), use_dropout=False, num_blocks=9, padding_mode='reflect', init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
input_shape = (1, 1, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 256, 256))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 3, 256, 256))
cfg = dict(type='ResnetGenerator', in_channels=3, out_channels=1, base_channels=64, norm_cfg=dict(type='IN'), use_dropout=False, num_blocks=9, padding_mode='reflect', init_cfg=dict(type='normal', gain=0.02))
net = build_backbone(cfg)
net.init_weights(pretrained=None)
input_shape = (1, 3, 256, 256)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 1, 256, 256))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 1, 256, 256))
bad_cfg = copy.deepcopy(cfg)
bad_cfg['num_blocks'] = (- 1)
with pytest.raises(AssertionError):
net = build_backbone(bad_cfg)
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
bad_cfg = copy.deepcopy(cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='IN')
with pytest.raises(AssertionError):
_ = build_backbone(bad_cfg)
|
def _demo_inputs(input_shape=(1, 3, 64, 64)):
'Create a superset of inputs needed to run backbone.\n\n Args:\n input_shape (tuple): input batch dimensions.\n Default: (1, 3, 64, 64).\n\n Returns:\n imgs: (Tensor): Images in FloatTensor with desired shapes.\n '
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
|
def test_basicvsr_net():
'Test BasicVSR.'
basicvsr = BasicVSRNet(mid_channels=64, num_blocks=30, spynet_pretrained=None)
input_tensor = torch.rand(1, 5, 3, 64, 64)
basicvsr.init_weights(pretrained=None)
output = basicvsr(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
if torch.cuda.is_available():
basicvsr = BasicVSRNet(mid_channels=64, num_blocks=30, spynet_pretrained=None).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
basicvsr.init_weights(pretrained=None)
output = basicvsr(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
with pytest.raises(AssertionError):
input_tensor = torch.rand(1, 5, 3, 61, 61)
basicvsr(input_tensor)
with pytest.raises(TypeError):
basicvsr.init_weights(pretrained=[1])
|
def test_basicvsr_plusplus():
'Test BasicVSR++.'
model = BasicVSRPlusPlus(mid_channels=64, num_blocks=7, is_low_res_input=True, spynet_pretrained=None, cpu_cache_length=100)
input_tensor = torch.rand(1, 5, 3, 64, 64)
model.init_weights(pretrained=None)
output = model(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
model = BasicVSRPlusPlus(mid_channels=64, num_blocks=7, is_low_res_input=True, spynet_pretrained=None, cpu_cache_length=3)
output = model(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
with pytest.raises(AssertionError):
input_tensor = torch.rand(1, 5, 3, 61, 61)
model(input_tensor)
with pytest.raises(TypeError):
model.init_weights(pretrained=[1])
model = BasicVSRPlusPlus(mid_channels=64, num_blocks=7, is_low_res_input=False, spynet_pretrained=None, cpu_cache_length=100)
input_tensor = torch.rand(1, 5, 3, 256, 256)
output = model(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
if torch.cuda.is_available():
model = BasicVSRPlusPlus(mid_channels=64, num_blocks=7, is_low_res_input=True, spynet_pretrained=None, cpu_cache_length=100).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
model.init_weights(pretrained=None)
output = model(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
model = BasicVSRPlusPlus(mid_channels=64, num_blocks=7, is_low_res_input=True, spynet_pretrained=None, cpu_cache_length=3).cuda()
output = model(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
with pytest.raises(AssertionError):
input_tensor = torch.rand(1, 5, 3, 61, 61).cuda()
model(input_tensor)
with pytest.raises(TypeError):
model.init_weights(pretrained=[1]).cuda()
model = BasicVSRPlusPlus(mid_channels=64, num_blocks=7, is_low_res_input=False, spynet_pretrained=None, cpu_cache_length=100).cuda()
input_tensor = torch.rand(1, 5, 3, 256, 256).cuda()
output = model(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
|
def test_feedback_block():
x1 = torch.rand(2, 16, 32, 32)
model = FeedbackBlock(16, 3, 8)
x2 = model(x1)
assert (x2.shape == x1.shape)
x3 = model(x2)
assert (x3.shape == x2.shape)
|
def test_feedback_block_custom():
x1 = torch.rand(2, 3, 32, 32)
model = FeedbackBlockCustom(3, 16, 3, 8)
x2 = model(x1)
assert (x2.shape == (2, 16, 32, 32))
|
def test_feedback_block_heatmap_attention():
x1 = torch.rand(2, 16, 32, 32)
heatmap = torch.rand(2, 5, 32, 32)
model = FeedbackBlockHeatmapAttention(16, 2, 8, 5, 2)
x2 = model(x1, heatmap)
assert (x2.shape == x1.shape)
x3 = model(x2, heatmap)
assert (x3.shape == x2.shape)
|
def test_dic_net():
model_cfg = dict(type='DICNet', in_channels=3, out_channels=3, mid_channels=48, num_blocks=6, hg_mid_channels=256, hg_num_keypoints=68, num_steps=4, upscale_factor=8, detach_attention=False)
model = build_backbone(model_cfg)
assert (model.__class__.__name__ == 'DICNet')
inputs = torch.rand(1, 3, 16, 16)
targets = torch.rand(1, 3, 128, 128)
loss_function = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters())
(output, _) = model(inputs)
optimizer.zero_grad()
loss = loss_function(output[(- 1)], targets)
loss.backward()
optimizer.step()
assert (len(output) == 4)
assert torch.is_tensor(output[(- 1)])
assert (output[(- 1)].shape == targets.shape)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters())
inputs = inputs.cuda()
targets = targets.cuda()
(output, _) = model(inputs)
optimizer.zero_grad()
loss = loss_function(output[(- 1)], targets)
loss.backward()
optimizer.step()
assert (len(output) == 4)
assert torch.is_tensor(output[(- 1)])
assert (output[(- 1)].shape == targets.shape)
with pytest.raises(OSError):
model.init_weights('')
with pytest.raises(TypeError):
model.init_weights(1)
|
def test_dynamic_upsampling_filter():
'Test DynamicUpsamplingFilter.'
with pytest.raises(TypeError):
DynamicUpsamplingFilter(filter_size=3)
with pytest.raises(ValueError):
DynamicUpsamplingFilter(filter_size=(3, 3, 3))
duf = DynamicUpsamplingFilter(filter_size=(5, 5))
x = torch.rand(1, 3, 4, 4)
filters = torch.rand(1, 25, 16, 4, 4)
output = duf(x, filters)
assert (output.shape == (1, 48, 4, 4))
duf = DynamicUpsamplingFilter(filter_size=(3, 3))
x = torch.rand(1, 3, 4, 4)
filters = torch.rand(1, 9, 16, 4, 4)
output = duf(x, filters)
assert (output.shape == (1, 48, 4, 4))
if torch.cuda.is_available():
duf = DynamicUpsamplingFilter(filter_size=(3, 3)).cuda()
x = torch.rand(1, 3, 4, 4).cuda()
filters = torch.rand(1, 9, 16, 4, 4).cuda()
output = duf(x, filters)
assert (output.shape == (1, 48, 4, 4))
|
def test_pcd_alignment():
'Test PCDAlignment.'
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, (- 1)):
input_list.append(torch.rand(1, 4, (2 ** i), (2 ** i)))
pcd_alignment = pcd_alignment
input_list = [v for v in input_list]
output = pcd_alignment(input_list, input_list)
assert (output.shape == (1, 4, 8, 8))
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
if torch.cuda.is_available():
pcd_alignment = PCDAlignment(mid_channels=4, deform_groups=2)
input_list = []
for i in range(3, 0, (- 1)):
input_list.append(torch.rand(1, 4, (2 ** i), (2 ** i)))
pcd_alignment = pcd_alignment.cuda()
input_list = [v.cuda() for v in input_list]
output = pcd_alignment(input_list, input_list)
assert (output.shape == (1, 4, 8, 8))
with pytest.raises(AssertionError):
pcd_alignment(input_list[0:2], input_list)
|
def test_tsa_fusion():
'Test TSAFusion.'
tsa_fusion = TSAFusion(mid_channels=4, num_frames=5, center_frame_idx=2)
input_tensor = torch.rand(1, 5, 4, 8, 8)
output = tsa_fusion(input_tensor)
assert (output.shape == (1, 4, 8, 8))
if torch.cuda.is_available():
tsa_fusion = tsa_fusion.cuda()
input_tensor = input_tensor.cuda()
output = tsa_fusion(input_tensor)
assert (output.shape == (1, 4, 8, 8))
|
def test_edvrnet():
'Test EDVRNet.'
edvrnet = EDVRNet(3, 3, mid_channels=8, num_frames=5, deform_groups=2, num_blocks_extraction=1, num_blocks_reconstruction=1, center_frame_idx=2, with_tsa=True)
input_tensor = torch.rand(1, 5, 3, 8, 8)
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert (output.shape == (1, 3, 32, 32))
edvrnet = EDVRNet(3, 3, mid_channels=8, num_frames=5, deform_groups=2, num_blocks_extraction=1, num_blocks_reconstruction=1, center_frame_idx=2, with_tsa=False)
output = edvrnet(input_tensor)
assert (output.shape == (1, 3, 32, 32))
with pytest.raises(AssertionError):
input_tensor = torch.rand(1, 5, 3, 3, 3)
edvrnet(input_tensor)
with pytest.raises(TypeError):
edvrnet.init_weights(pretrained=[1])
if torch.cuda.is_available():
edvrnet = EDVRNet(3, 3, mid_channels=8, num_frames=5, deform_groups=2, num_blocks_extraction=1, num_blocks_reconstruction=1, center_frame_idx=2, with_tsa=True).cuda()
input_tensor = torch.rand(1, 5, 3, 8, 8).cuda()
edvrnet.init_weights(pretrained=None)
output = edvrnet(input_tensor)
assert (output.shape == (1, 3, 32, 32))
edvrnet = EDVRNet(3, 3, mid_channels=8, num_frames=5, deform_groups=2, num_blocks_extraction=1, num_blocks_reconstruction=1, center_frame_idx=2, with_tsa=False).cuda()
output = edvrnet(input_tensor)
assert (output.shape == (1, 3, 32, 32))
with pytest.raises(AssertionError):
input_tensor = torch.rand(1, 5, 3, 3, 3).cuda()
edvrnet(input_tensor)
with pytest.raises(TypeError):
edvrnet.init_weights(pretrained=[1])
|
class TestGLEANNet():
@classmethod
def setup_class(cls):
cls.default_cfg = dict(in_size=16, out_size=256, style_channels=512)
cls.size_cfg = dict(in_size=16, out_size=16, style_channels=512)
def test_glean_styleganv2_cpu(self):
glean = GLEANStyleGANv2(**self.default_cfg)
img = torch.randn(2, 3, 16, 16)
res = glean(img)
assert (res.shape == (2, 3, 256, 256))
with pytest.raises(TypeError):
glean.init_weights(pretrained=[1])
with pytest.raises(AssertionError):
res = glean(torch.randn(2, 3, 17, 32))
with pytest.raises(ValueError):
glean = GLEANStyleGANv2(**self.size_cfg)
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires cuda')
def test_glean_styleganv2_cuda(self):
glean = GLEANStyleGANv2(**self.default_cfg).cuda()
img = torch.randn(2, 3, 16, 16).cuda()
res = glean(img)
assert (res.shape == (2, 3, 256, 256))
with pytest.raises(TypeError):
glean.init_weights(pretrained=[1])
with pytest.raises(AssertionError):
res = glean(torch.randn(2, 3, 32, 17).cuda())
with pytest.raises(ValueError):
glean = GLEANStyleGANv2(**self.size_cfg).cuda()
|
def test_iconvsr():
'Test IconVSR.'
if torch.cuda.is_available():
iconvsr = IconVSR(mid_channels=64, num_blocks=30, keyframe_stride=5, padding=2, spynet_pretrained=None, edvr_pretrained=None).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
iconvsr.init_weights(pretrained=None)
output = iconvsr(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
with pytest.raises(AssertionError):
input_tensor = torch.rand(1, 5, 3, 61, 61)
iconvsr(input_tensor)
with pytest.raises(TypeError):
iconvsr.init_weights(pretrained=[1])
with pytest.raises(TypeError):
iconvsr = IconVSR(mid_channels=64, num_blocks=30, keyframe_stride=5, padding=2, spynet_pretrained=123, edvr_pretrained=None).cuda()
with pytest.raises(TypeError):
iconvsr = IconVSR(mid_channels=64, num_blocks=30, keyframe_stride=5, padding=2, spynet_pretrained=None, edvr_pretrained=123).cuda()
|
def test_liif_edsr():
model_cfg = dict(type='LIIFEDSR', encoder=dict(type='EDSR', in_channels=3, out_channels=3, mid_channels=64, num_blocks=16), imnet=dict(type='MLPRefiner', in_dim=64, out_dim=3, hidden_list=[256, 256, 256, 256]), local_ensemble=True, feat_unfold=True, cell_decode=True, eval_bsize=30000)
model = build_backbone(model_cfg)
assert (model.__class__.__name__ == 'LIIFEDSR')
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, (128 * 64), 3)
coord = torch.rand(1, (128 * 64), 2)
cell = torch.rand(1, (128 * 64), 2)
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
targets = targets.cuda()
coord = coord.cuda()
cell = cell.cuda()
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
|
def test_liif_rdn():
model_cfg = dict(type='LIIFRDN', encoder=dict(type='RDN', in_channels=3, out_channels=3, mid_channels=64, num_blocks=16, upscale_factor=4, num_layers=8, channel_growth=64), imnet=dict(type='MLPRefiner', in_dim=64, out_dim=3, hidden_list=[256, 256, 256, 256]), local_ensemble=True, feat_unfold=True, cell_decode=True, eval_bsize=30000)
model = build_backbone(model_cfg)
assert (model.__class__.__name__ == 'LIIFRDN')
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, (128 * 64), 3)
coord = torch.rand(1, (128 * 64), 2)
cell = torch.rand(1, (128 * 64), 2)
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
targets = targets.cuda()
coord = coord.cuda()
cell = cell.cuda()
output = model(inputs, coord, cell)
output = model(inputs, coord, cell, True)
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
|
def test_rdn():
scale = 4
model_cfg = dict(type='RDN', in_channels=3, out_channels=3, mid_channels=64, num_blocks=16, upscale_factor=scale)
model = build_backbone(model_cfg)
assert (model.__class__.__name__ == 'RDN')
inputs = torch.rand(1, 3, 32, 16)
targets = torch.rand(1, 3, 128, 64)
loss_function = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters())
output = model(inputs)
optimizer.zero_grad()
loss = loss_function(output, targets)
loss.backward()
optimizer.step()
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters())
inputs = inputs.cuda()
targets = targets.cuda()
output = model(inputs)
optimizer.zero_grad()
loss = loss_function(output, targets)
loss.backward()
optimizer.step()
assert torch.is_tensor(output)
assert (output.shape == targets.shape)
|
def test_real_basicvsr_net():
'Test RealBasicVSR.'
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=False)
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=True, is_sequential_cleaning=False)
input_tensor = torch.rand(1, 5, 3, 64, 64)
real_basicvsr.init_weights(pretrained=None)
output = real_basicvsr(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=True, is_sequential_cleaning=True)
(output, lq) = real_basicvsr(input_tensor, return_lqs=True)
assert (output.shape == (1, 5, 3, 256, 256))
assert (lq.shape == (1, 5, 3, 64, 64))
with pytest.raises(TypeError):
real_basicvsr.init_weights(pretrained=[1])
if torch.cuda.is_available():
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=False).cuda()
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=True, is_sequential_cleaning=False).cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
real_basicvsr.init_weights(pretrained=None)
output = real_basicvsr(input_tensor)
assert (output.shape == (1, 5, 3, 256, 256))
real_basicvsr = RealBasicVSRNet(is_fix_cleaning=True, is_sequential_cleaning=True).cuda()
(output, lq) = real_basicvsr(input_tensor, return_lqs=True)
assert (output.shape == (1, 5, 3, 256, 256))
assert (lq.shape == (1, 5, 3, 64, 64))
with pytest.raises(TypeError):
real_basicvsr.init_weights(pretrained=[1])
|
def test_srresnet_backbone():
'Test SRResNet backbone.'
MSRResNet(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=2)
net = MSRResNet(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=3)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 36, 36))
net = MSRResNet(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=4)
net.init_weights(pretrained=None)
output = net(img)
assert (output.shape == (1, 3, 48, 48))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 3, 48, 48))
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
with pytest.raises(ValueError):
MSRResNet(in_channels=3, out_channels=3, mid_channels=64, num_blocks=16, upscale_factor=16)
|
def test_edsr():
'Test EDSR.'
EDSR(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=2)
net = EDSR(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=3)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 36, 36))
net = EDSR(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, upscale_factor=4)
net.init_weights(pretrained=None)
output = net(img)
assert (output.shape == (1, 3, 48, 48))
net = EDSR(in_channels=1, out_channels=1, mid_channels=8, num_blocks=2, upscale_factor=4, rgb_mean=[0], rgb_std=[1])
net.init_weights(pretrained=None)
gray = _demo_inputs((1, 1, 12, 12))
output = net(gray)
assert (output.shape == (1, 1, 48, 48))
if torch.cuda.is_available():
net = net.cuda()
output = net(gray.cuda())
assert (output.shape == (1, 1, 48, 48))
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
with pytest.raises(ValueError):
EDSR(in_channels=3, out_channels=3, mid_channels=64, num_blocks=16, upscale_factor=5)
|
def test_discriminator():
'Test discriminator backbone.'
net = ModifiedVGG(in_channels=3, mid_channels=64)
net.init_weights(pretrained=None)
input_shape = (1, 3, 128, 128)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 1))
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 1))
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
with pytest.raises(AssertionError):
input_shape = (1, 3, 64, 64)
img = _demo_inputs(input_shape)
output = net(img)
|
def test_rrdbnet_backbone():
'Test RRDBNet backbone.'
net = RRDBNet(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, growth_channels=4, upscale_factor=4)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 48, 48))
with pytest.raises(ValueError):
net = RRDBNet(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, growth_channels=4, upscale_factor=3)
net = RRDBNet(in_channels=3, out_channels=3, mid_channels=8, num_blocks=2, growth_channels=4, upscale_factor=2)
net.init_weights(pretrained=None)
input_shape = (1, 3, 12, 12)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 24, 24))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 3, 24, 24))
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
|
def test_srcnn():
net = SRCNN(channels=(3, 4, 6, 3), kernel_sizes=(9, 1, 5), upscale_factor=4)
net.init_weights(pretrained=None)
input_shape = (1, 3, 4, 4)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 3, 16, 16))
net = SRCNN(channels=(1, 4, 8, 1), kernel_sizes=(3, 3, 3), upscale_factor=2)
net.init_weights(pretrained=None)
input_shape = (1, 1, 4, 4)
img = _demo_inputs(input_shape)
output = net(img)
assert (output.shape == (1, 1, 8, 8))
if torch.cuda.is_available():
net = net.cuda()
output = net(img.cuda())
assert (output.shape == (1, 1, 8, 8))
with pytest.raises(AssertionError):
net = SRCNN(channels=(3, 4, 3), kernel_sizes=(9, 1, 5), upscale_factor=4)
with pytest.raises(AssertionError):
net = SRCNN(channels=(3, 4, 4, 3), kernel_sizes=(9, 1, 1, 5), upscale_factor=4)
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
|
def _demo_inputs(input_shape=(1, 3, 64, 64)):
'Create a superset of inputs needed to run backbone.\n\n Args:\n input_shape (tuple): input batch dimensions.\n Default: (1, 3, 64, 64).\n\n Returns:\n imgs: (Tensor): Images in FloatTensor with desired shapes.\n '
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
|
def test_tdan_net():
'Test TDANNet.'
if torch.cuda.is_available():
tdan = TDANNet().cuda()
input_tensor = torch.rand(1, 5, 3, 64, 64).cuda()
tdan.init_weights(pretrained=None)
output = tdan(input_tensor)
assert (len(output) == 2)
assert (output[0].shape == (1, 3, 256, 256))
assert (output[1].shape == (1, 5, 3, 64, 64))
with pytest.raises(TypeError):
tdan.init_weights(pretrained=[1])
|
def test_tof():
'Test TOFlow.'
tof = TOFlow(adapt_official_weights=True)
input_tensor = torch.rand(1, 7, 3, 32, 32)
tof.init_weights(pretrained=None)
output = tof(input_tensor)
assert (output.shape == (1, 3, 32, 32))
tof = TOFlow(adapt_official_weights=False)
tof.init_weights(pretrained=None)
output = tof(input_tensor)
assert (output.shape == (1, 3, 32, 32))
with pytest.raises(TypeError):
tof.init_weights(pretrained=[1])
if torch.cuda.is_available():
tof = TOFlow(adapt_official_weights=True).cuda()
input_tensor = torch.rand(1, 7, 3, 32, 32).cuda()
tof.init_weights(pretrained=None)
output = tof(input_tensor)
assert (output.shape == (1, 3, 32, 32))
|
def test_tof_vfi_net():
model_cfg = dict(type='TOFlowVFINet')
model = build_backbone(model_cfg)
assert (model.__class__.__name__ == 'TOFlowVFINet')
inputs = torch.rand(1, 2, 3, 256, 248)
output = model(inputs)
assert torch.is_tensor(output)
assert (output.shape == (1, 3, 256, 248))
if torch.cuda.is_available():
model = model.cuda()
inputs = inputs.cuda()
output = model(inputs)
output = model(inputs, True)
assert torch.is_tensor(output)
assert (output.shape == (1, 3, 256, 256))
inputs = torch.rand(1, 2, 3, 256, 256)
output = model(inputs)
assert torch.is_tensor(output)
with pytest.raises(OSError):
model.init_weights('')
with pytest.raises(TypeError):
model.init_weights(1)
with pytest.raises(OSError):
model_cfg = dict(type='TOFlowVFINet', flow_cfg=dict(norm_cfg=None, pretrained=''))
model = build_backbone(model_cfg)
with pytest.raises(TypeError):
model_cfg = dict(type='TOFlowVFINet', flow_cfg=dict(norm_cfg=None, pretrained=1))
model = build_backbone(model_cfg)
|
class TestBaseModel(unittest.TestCase):
@patch.multiple(BaseModel, __abstractmethods__=set())
def test_parse_losses(self):
self.base_model = BaseModel()
with pytest.raises(TypeError):
losses = dict(loss=0.5)
self.base_model.parse_losses(losses)
a_loss = [torch.randn(5, 5), torch.randn(5, 5)]
b_loss = torch.randn(5, 5)
losses = dict(a_loss=a_loss, b_loss=b_loss)
r_a_loss = sum((_loss.mean() for _loss in a_loss))
r_b_loss = b_loss.mean()
r_loss = [r_a_loss, r_b_loss]
r_loss = sum(r_loss)
(loss, log_vars) = self.base_model.parse_losses(losses)
assert (r_loss == loss)
assert (set(log_vars.keys()) == set(['a_loss', 'b_loss', 'loss']))
assert (log_vars['a_loss'] == r_a_loss)
assert (log_vars['b_loss'] == r_b_loss)
assert (log_vars['loss'] == r_loss)
|
def test_ensemble_cpu():
model = nn.Identity()
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 3, 4, 4)
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.numpy(), outputs.numpy())
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 2, 3, 4, 4)
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.numpy(), outputs.numpy())
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 2, 3, 4, 4)
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.numpy(), outputs.numpy())
with pytest.raises(ValueError):
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 3, 4, 4)
outputs = ensemble(inputs, model)
|
def test_ensemble_cuda():
if torch.cuda.is_available():
model = nn.Identity().cuda()
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.cpu().numpy(), outputs.cpu().numpy())
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=False)
inputs = torch.rand(1, 2, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.cpu().numpy(), outputs.cpu().numpy())
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 2, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
np.testing.assert_almost_equal(inputs.cpu().numpy(), outputs.cpu().numpy())
with pytest.raises(ValueError):
ensemble = SpatialTemporalEnsemble(is_temporal_ensemble=True)
inputs = torch.rand(1, 3, 4, 4).cuda()
outputs = ensemble(inputs, model)
|
def test_normalize_layer():
rgb_mean = (1, 2, 3)
rgb_std = (1, 0.5, 0.25)
layer = ImgNormalize(1, rgb_mean, rgb_std)
x = torch.randn((2, 3, 64, 64))
y = layer(x)
x = x.permute((1, 0, 2, 3)).reshape((3, (- 1)))
y = y.permute((1, 0, 2, 3)).reshape((3, (- 1)))
rgb_mean = torch.tensor(rgb_mean)
rgb_std = torch.tensor(rgb_std)
mean_x = x.mean(dim=1)
mean_y = y.mean(dim=1)
std_x = x.std(dim=1)
std_y = y.std(dim=1)
assert (sum((torch.div(std_x, std_y) - rgb_std)) < 1e-05)
assert (sum((torch.div((mean_x - rgb_mean), rgb_std) - mean_y)) < 1e-05)
|
def test_pixel_shuffle():
model = PixelShufflePack(3, 3, 2, 3)
model.init_weights()
x = torch.rand(1, 3, 16, 16)
y = model(x)
assert (y.shape == (1, 3, 32, 32))
if torch.cuda.is_available():
model = model.cuda()
x = x.cuda()
y = model(x)
assert (y.shape == (1, 3, 32, 32))
|
def test_pixel_unshuffle():
x = torch.rand(1, 3, 20, 20)
y = pixel_unshuffle(x, scale=2)
assert (y.shape == (1, 12, 10, 10))
with pytest.raises(AssertionError):
y = pixel_unshuffle(x, scale=3)
if torch.cuda.is_available():
x = x.cuda()
y = pixel_unshuffle(x, scale=2)
assert (y.shape == (1, 12, 10, 10))
with pytest.raises(AssertionError):
y = pixel_unshuffle(x, scale=3)
|
def test_deepfillv1_disc():
model_config = dict(global_disc_cfg=dict(type='MultiLayerDiscriminator', in_channels=3, max_channels=256, fc_in_channels=((256 * 16) * 16), fc_out_channels=1, num_convs=4, norm_cfg=None, act_cfg=dict(type='ELU'), out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2)), local_disc_cfg=dict(type='MultiLayerDiscriminator', in_channels=3, max_channels=512, fc_in_channels=((512 * 8) * 8), fc_out_channels=1, num_convs=4, norm_cfg=None, act_cfg=dict(type='ELU'), out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2)))
disc = DeepFillv1Discriminators(**model_config)
disc.init_weights()
global_x = torch.rand((2, 3, 256, 256))
local_x = torch.rand((2, 3, 128, 128))
(global_pred, local_pred) = disc((global_x, local_x))
assert (global_pred.shape == (2, 1))
assert (local_pred.shape == (2, 1))
assert isinstance(disc.global_disc, MultiLayerDiscriminator)
assert isinstance(disc.local_disc, MultiLayerDiscriminator)
with pytest.raises(TypeError):
disc.init_weights(model_config)
if torch.cuda.is_available():
disc = DeepFillv1Discriminators(**model_config).cuda()
disc.init_weights()
global_x = torch.rand((2, 3, 256, 256)).cuda()
local_x = torch.rand((2, 3, 128, 128)).cuda()
(global_pred, local_pred) = disc((global_x, local_x))
assert (global_pred.shape == (2, 1))
assert (local_pred.shape == (2, 1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.