code
stringlengths
17
6.64M
@pytest.mark.unit @pytest.mark.convert def test_filter_on_extension_with_predicate(): 'Test convert.filter_on_extension with a predicate argument' test_files = ['file_one.fits', 'file_two.fits', 'file_three.exclude'] extensions = ['fits'] expected_list = test_files[:1] predicate = (lambda f: (f == test_files[1])) actual_list = convert.filter_on_extension(test_files, extensions, predicate) assert (expected_list == actual_list)
@pytest.mark.unit @pytest.mark.convert def test_make_dirs(): 'Test convert.make_dirs' helpers.setup() out_dir = helpers.TEST_PATH expected_dirs = set(list(map((lambda f: os.path.join(out_dir, f)), ['0/0', '1/0', '1/1', '2/0', '2/1', '2/2', '2/3']))) convert.make_dirs(out_dir, 0, 2) dirs_exists = all(map(os.path.exists, expected_dirs)) helpers.tear_down() assert dirs_exists
@pytest.mark.unit @pytest.mark.convert def test_get_zoom_range(): 'Test convert.get_zoom_range' in_shape = [10000, 10000] tile_size = [256, 256] expected_min = 0 expected_max = 5 (actual_min, acutal_max) = convert.get_zoom_range(in_shape, tile_size) assert (expected_min == actual_min) assert (expected_max == acutal_max)
@pytest.mark.unit @pytest.mark.convert def test_get_total_tiles(): 'Test convert.get_total_tiles' (min_zoom, max_zoom) = (0, 2) expected_number = 21 actual_number = convert.get_total_tiles(min_zoom, max_zoom) assert (expected_number == actual_number)
@pytest.mark.unit @pytest.mark.convert def test_imread_default(): 'Test convert.imread_default() with valid path' helpers.setup(with_data=True) test_file = os.path.join(helpers.TEST_PATH, 'test_tiling_image.jpg') expected_array = np.flipud(Image.open(test_file)) empty_array = np.zeros([256, 256]) actual_array = convert.imread_default(test_file, empty_array) helpers.tear_down() np.testing.assert_equal(expected_array, actual_array)
@pytest.mark.unit @pytest.mark.convert def test_imread_default_invalid_path(): 'Test convert.imread_default() with valid path' helpers.setup(with_data=True) test_file = os.path.join(helpers.TEST_PATH, 'doesnt_exist.jpg') empty_array = np.zeros([256, 256]) actual_array = convert.imread_default(test_file, empty_array) helpers.tear_down() np.testing.assert_equal(empty_array, actual_array)
@pytest.mark.unit @pytest.mark.convert def test_get_map_layer_name(): 'Test convert.get_map_layer_name' test_file_name = './test/test_file.png' expected_layer_name = 'test_file' actual_layer_name = convert.get_map_layer_name(test_file_name) assert (expected_layer_name == actual_layer_name)
@pytest.mark.unit @pytest.mark.convert def test_get_marker_file_name(): 'Test convert.get_marker_file_names' test_file_name = './test/test_file.cat' expected_marker_file_name = 'test_file.cat.js' actual_marker_file_name = convert.get_marker_file_name(test_file_name) assert (expected_marker_file_name == actual_marker_file_name)
@pytest.mark.unit @pytest.mark.convert def test_line_to_cols(): 'Test convert.line_to_cols' line = ['ID', 'RA', 'dec', 'test1', 'test2'] actual_cols = convert.line_to_cols(line) expected_cols = line expected_cols[0] = 'id' expected_cols[1] = 'ra' assert (expected_cols == actual_cols)
@pytest.mark.unit @pytest.mark.convert def test_line_to_cols_with_hash(): 'Test convert.line_to_cols' line = ['#', 'ID', 'RA', 'dec', 'test1', 'test2'] actual_cols = convert.line_to_cols(line) expected_cols = line[1:] expected_cols[0] = 'id' expected_cols[1] = 'ra' assert (expected_cols == actual_cols)
@pytest.mark.unit @pytest.mark.convert def test_line_to_json_xy(): 'Test convert.line_to_json with x/y' helpers.setup() in_wcs = None columns = ['id', 'x', 'y', 'col1', 'col2'] catalog_assets_path = os.path.join(helpers.TEST_PATH, 'catalog_assets') os.mkdir(catalog_assets_path) in_line = ['1', '10', '20', 'abc', '123'] expected_json = dict(geometry=dict(coordinates=[9.5, 19.5]), tags=dict(a=(- 1), b=(- 1), theta=(- 1), catalog_id='1', cat_path='catalog_assets')) actual_json = convert.line_to_json(in_wcs, columns, catalog_assets_path, in_line) helpers.tear_down() assert (expected_json == actual_json)
@pytest.mark.unit @pytest.mark.convert @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_line_to_json_ra_dec(): 'Test convert.line_to_json with ra/dec' helpers.setup(with_data=True) in_wcs = WCS(fits.getheader(os.path.join(helpers.TEST_PATH, 'test_image.fits'))) columns = ['id', 'ra', 'dec', 'col1', 'col2'] catalog_assets_path = os.path.join(helpers.TEST_PATH, 'catalog_assets') os.mkdir(catalog_assets_path) in_line = ['1', '53.18575', '-27.898664', 'abc', '123'] expected_json = dict(geometry=dict(coordinates=[289.87867109328727, 301.2526406693396]), tags=dict(a=(- 1), b=(- 1), theta=(- 1), catalog_id='1', cat_path='catalog_assets')) actual_json = convert.line_to_json(in_wcs, columns, catalog_assets_path, in_line) helpers.tear_down() np.testing.assert_allclose(expected_json['geometry']['coordinates'], actual_json['geometry']['coordinates'], atol=1e-06) assert (expected_json['tags'] == actual_json['tags'])
@pytest.mark.unit @pytest.mark.convert def test_tile_img_pil_serial(): 'Test convert.tile_img' helpers.disbale_tqdm() helpers.setup(with_data=True) out_dir = helpers.TEST_PATH test_image = os.path.join(out_dir, 'test_tiling_image.jpg') pbar_ref = [0, queue.Queue()] convert.tile_img(test_image, pbar_ref, out_dir=out_dir) expected_dir = os.path.join(out_dir, 'expected_test_tiling_image_pil') actual_dir = os.path.join(out_dir, 'test_tiling_image') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down() helpers.enable_tqdm() assert dirs_match
@pytest.mark.unit @pytest.mark.convert def test_tile_img_pil_serial_png_from_tiff(): 'Test convert.tile_img using a converted TIFF->PNG image, has only 2 dims' helpers.disbale_tqdm() helpers.setup(with_data=True) out_dir = helpers.TEST_PATH test_image = os.path.join(out_dir, 'test_png_from_tiff.png') pbar_ref = [0, queue.Queue()] convert.tile_img(test_image, pbar_ref, out_dir=out_dir) expected_dir = os.path.join(out_dir, 'expected_test_png_from_tiff') actual_dir = os.path.join(out_dir, 'test_png_from_tiff') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down() helpers.enable_tqdm() assert dirs_match
@pytest.mark.unit @pytest.mark.convert def test_tile_img_mpl_fits_serial(): 'Test convmax_percentert.tile_img' helpers.disbale_tqdm() helpers.setup(with_data=True) out_dir = helpers.TEST_PATH test_image = os.path.join(out_dir, 'test_img_for_map.fits') pbar_ref = [0, queue.Queue()] convert.tile_img(test_image, pbar_ref, out_dir=out_dir, norm_kwargs=dict(stretch='log', max_percent=99.9)) expected_dir = os.path.join(out_dir, 'expected_test_img_for_map') actual_dir = os.path.join(out_dir, 'test_img_for_map') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down() helpers.enable_tqdm() assert dirs_match
@pytest.mark.unit @pytest.mark.convert def test_tile_img_mpl_fits_serial_with_fname_kwargs(): 'Test convmax_percentert.tile_img' helpers.disbale_tqdm() helpers.setup(with_data=True) out_dir = helpers.TEST_PATH test_image = os.path.join(out_dir, 'test_img_for_map.fits') pbar_ref = [0, queue.Queue()] convert.tile_img(test_image, pbar_ref, out_dir=out_dir, norm_kwargs={'test_img_for_map.fits': dict(stretch='log', max_percent=99.9)}) expected_dir = os.path.join(out_dir, 'expected_test_img_for_map') actual_dir = os.path.join(out_dir, 'test_img_for_map') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down() helpers.enable_tqdm() assert dirs_match
def test_simplify_mixed_ws(): 'Test convert._simplify_mixed_ws' helpers.disbale_tqdm() helpers.setup(with_data=True) test_lines = ['a b c\n', 'test\tdata stuff\n', 'to test\tstuff\n'] out_file = os.path.join(helpers.DATA_DIR, 'test.cat') with open(out_file, 'w') as f: f.writelines(test_lines) convert._simplify_mixed_ws(out_file) expected_test_lines = ['a b c\n', 'test data stuff\n', 'to test stuff\n'] with open(out_file, 'r') as f: actual_lines = f.readlines() helpers.tear_down() helpers.enable_tqdm() assert (expected_test_lines == actual_lines)
@pytest.mark.unit @pytest.mark.convert def test_tile_img_pil_parallel(): 'Test convert.tile_img' helpers.disbale_tqdm() helpers.setup(with_data=True) out_dir = helpers.TEST_PATH test_image = os.path.join(out_dir, 'test_tiling_image.jpg') pbar_ref = [0, queue.Queue()] convert.tile_img(test_image, pbar_ref, out_dir=out_dir, mp_procs=2) expected_dir = os.path.join(out_dir, 'expected_test_tiling_image_pil') actual_dir = os.path.join(out_dir, 'test_tiling_image') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down(include_ray=True) helpers.enable_tqdm() assert dirs_match
@pytest.mark.unit @pytest.mark.convert def test_tile_img_mpl_parallel(): 'Test convert.tile_img' helpers.disbale_tqdm() helpers.setup(with_data=True) out_dir = helpers.TEST_PATH test_image = os.path.join(out_dir, 'test_img_for_map.fits') pbar_ref = [0, queue.Queue()] convert.tile_img(test_image, pbar_ref, out_dir=out_dir, mp_procs=2, norm_kwargs=dict(stretch='log', max_percent=99.9)) expected_dir = os.path.join(out_dir, 'expected_test_img_for_map') actual_dir = os.path.join(out_dir, 'test_img_for_map') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down(include_ray=True) helpers.enable_tqdm() assert dirs_match
@pytest.mark.unit @pytest.mark.convert @pytest.mark.skipif(condition=(not sys.platform.startswith('linux')), reason='temp fix, need osx/windows artififacts for cbor/pbf files') def test_version_not_hard_coded(): 'Tests that the version in the testing artifacts is not hard coded' helpers.disbale_tqdm() helpers.setup(with_data=True) file = 'index.html' dirs = ['expected_test_web', 'expected_test_web_ellipse', 'expected_test_web_no_marker'] tests = {} for d in dirs: with open(os.path.join(helpers.TEST_PATH, d, file), 'r') as f: text = f.read() tests[d] = ('vVERSION' in text) helpers.tear_down() helpers.enable_tqdm() failed = [d for (d, v) in tests.items() if (v == False)] assert (len(failed) == 0), 'VERSION not found in {}, likely hardcoded'.format(failed)
@pytest.mark.integration @pytest.mark.convert @pytest.mark.skipif(condition=(not sys.platform.startswith('linux')), reason='temp fix, need osx/windows artififacts for cbor/pbf files') @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_files_to_map(): 'Integration test for making files into map' helpers.disbale_tqdm() helpers.setup(with_data=True) with_path = (lambda f: os.path.join(helpers.TEST_PATH, f)) out_dir = with_path('test_web') files = [with_path('test_tiling_image.jpg'), with_path('test_catalog_radec.cat')] convert.files_to_map(files, out_dir=out_dir, cat_wcs_fits_file=with_path('test_image.fits'), catalog_delim=' ') expected_dir = with_path('expected_test_web') version = helpers.get_version() raw_path = os.path.join(expected_dir, 'index.html') with open(raw_path, 'r') as f: converted = list(map((lambda l: l.replace('VERSION', version)), f.readlines())) with open(raw_path, 'w') as f: f.writelines(converted) actual_dir = with_path('test_web') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down(include_ray=True) helpers.enable_tqdm() assert dirs_match
@pytest.mark.integration @pytest.mark.convert @pytest.mark.skipif(condition=(not sys.platform.startswith('linux')), reason='temp fix, need osx/windows artififacts for cbor/pbf files') @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_files_to_map_ellipse_markers(): 'Integration test for making files into map' helpers.disbale_tqdm() helpers.setup(with_data=True) with_path = (lambda f: os.path.join(helpers.TEST_PATH, f)) out_dir = with_path('test_web') files = [with_path('test_tiling_image.jpg'), with_path('test_catalog_xy_ellipse.cat')] convert.files_to_map(files, out_dir=out_dir, catalog_delim=' ') expected_dir = with_path('expected_test_web_ellipse') version = helpers.get_version() raw_path = os.path.join(expected_dir, 'index.html') with open(raw_path, 'r') as f: converted = list(map((lambda l: l.replace('VERSION', version)), f.readlines())) with open(raw_path, 'w') as f: f.writelines(converted) actual_dir = with_path('test_web') dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down(include_ray=True) helpers.enable_tqdm() assert dirs_match
@pytest.mark.integration @pytest.mark.convert @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_files_to_map_fails_file_not_found(): 'Integration test for making files into map' helpers.disbale_tqdm() helpers.setup(with_data=True) with_path = (lambda f: os.path.join(helpers.TEST_PATH, f)) out_dir = with_path('test_web') files = [with_path('test_tiling_image.jpg'), with_path('test_catalog_radec.cat'), with_path('does_not_exist.txt')] with pytest.raises(AssertionError): convert.files_to_map(files, out_dir=out_dir, cat_wcs_fits_file=with_path('test_image.fits')) helpers.tear_down() helpers.enable_tqdm()
@pytest.mark.integration @pytest.mark.convert @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_dir_to_map_fails_no_files(): 'Integration test for making files into map' helpers.disbale_tqdm() helpers.setup(with_data=True) with_path = (lambda f: os.path.join(helpers.TEST_PATH, f)) out_dir = with_path('test_web') in_dir = with_path('test_web_in') if (not os.path.exists(in_dir)): os.mkdir(in_dir) with pytest.raises(AssertionError): convert.dir_to_map(in_dir, out_dir=out_dir, cat_wcs_fits_file=with_path('test_image.fits')) helpers.tear_down() helpers.enable_tqdm()
@pytest.mark.integration @pytest.mark.convert @pytest.mark.skipif(condition=(not sys.platform.startswith('linux')), reason='temp fix, need osx/windows artififacts for cbor/pbf files') @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_dir_to_map(): 'Integration test for making files into map' helpers.disbale_tqdm() helpers.setup(with_data=True) with_path = (lambda f: os.path.join(helpers.TEST_PATH, f)) out_dir = with_path('test_web') in_dir = with_path('test_web_in') if (not os.path.exists(in_dir)): os.mkdir(in_dir) files = ['test_tiling_image.jpg', 'test_catalog_radec.cat'] for f in files: shutil.copy(with_path(f), os.path.join(in_dir, f)) expected_dir = with_path('expected_test_web') version = helpers.get_version() raw_path = os.path.join(expected_dir, 'index.html') with open(raw_path, 'r') as f: converted = list(map((lambda l: l.replace('VERSION', version)), f.readlines())) with open(raw_path, 'w') as f: f.writelines(converted) convert.dir_to_map(in_dir, out_dir=out_dir, catalog_delim=' ', cat_wcs_fits_file=with_path('test_image.fits')) actual_dir = out_dir dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down(include_ray=True) helpers.enable_tqdm() assert dirs_match
@pytest.mark.integration @pytest.mark.convert @pytest.mark.filterwarnings('ignore:.*:astropy.io.fits.verify.VerifyWarning') def test_dir_to_map_no_markers(): 'Integration test for making files into map' helpers.disbale_tqdm() helpers.setup(with_data=True) with_path = (lambda f: os.path.join(helpers.TEST_PATH, f)) out_dir = with_path('test_web') in_dir = with_path('test_web_in') if (not os.path.exists(in_dir)): os.mkdir(in_dir) files = ['test_tiling_image.jpg'] for f in files: shutil.copy(with_path(f), os.path.join(in_dir, f)) expected_dir = with_path('expected_test_web_no_marker') version = helpers.get_version() raw_path = os.path.join(expected_dir, 'index.html') with open(raw_path, 'r') as f: converted = list(map((lambda l: l.replace('VERSION', version)), f.readlines())) with open(raw_path, 'w') as f: f.writelines(converted) convert.dir_to_map(in_dir, out_dir=out_dir) actual_dir = out_dir dirs_match = helpers.compare_file_directories(expected_dir, actual_dir) helpers.tear_down(include_ray=True) helpers.enable_tqdm() assert dirs_match
@pytest.mark.unit def test_build_digit_to_string(): 'test cartographer.build_digit_to_string' digits = range(10) strings = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] for (expected, actual) in zip(strings, map(u.digit_to_string, digits)): assert (expected == actual)
@pytest.mark.unit def test_build_digit_to_string_fails(): 'test cartographer.build_digit_to_string' digit = (- 1) with pytest.raises(ValueError) as excinfo: u.digit_to_string(digit) assert ('Only digits 0-9 are supported' in str(excinfo.value))
@pytest.mark.unit def test_make_fname_js_safe_digit(): 'Test the cartographer.make_fname_js_safe functions.' unsafe = '123' expected = 'one23' assert (expected == u.make_fname_js_safe(unsafe))
@pytest.mark.unit def test_make_fname_js_safe_dot_dash(): 'Test the cartographer.make_fname_js_safe functions.' unsafe = 'a.b-c' expected = 'a_dot_b_c' assert (expected == u.make_fname_js_safe(unsafe))
@pytest.mark.unit def test_make_fname_js_safe_no_change(): 'Test the cartographer.make_fname_js_safe functions.' safe = 'abc' expected = 'abc' assert (expected == u.make_fname_js_safe(safe))
@pytest.mark.unit def test_MockQueue(): 'Test the MockQueue class.' bar = tqdm() q = u.MockQueue(bar) q.put(100) assert (q.bar.n == 100)
@pytest.mark.unit def test_backpressure_queue(): 'Test backpressure_queue.' helpers.setup() pbar_ref = (0, u.MockQueue(helpers.MockTQDM())) n_parallel_jobs = 1 f_args = [[None], [None], [None]] hit_all_queue = [False, False, False] wait_one = [True] def wait_f(in_progress: List[Any]): still_running = (in_progress[1:] if (len(in_progress) > 1) else []) if ((len(still_running) == 0) and wait_one[0]): wait_one[0] = False return (None, in_progress) return (None, still_running) def work_f(hit_all_queue, *args): hit_all_queue[hit_all_queue.index(False)] = True return args work_f_with_check = partial(work_f, hit_all_queue) u.backpressure_queue(wait_f, work_f_with_check, f_args, pbar_ref, n_parallel_jobs) helpers.tear_down() assert all(hit_all_queue)
def digit_to_string(digit: int) -> str: 'Converts an integer into its word representation' if (digit == 0): return 'zero' elif (digit == 1): return 'one' elif (digit == 2): return 'two' elif (digit == 3): return 'three' elif (digit == 4): return 'four' elif (digit == 5): return 'five' elif (digit == 6): return 'six' elif (digit == 7): return 'seven' elif (digit == 8): return 'eight' elif (digit == 9): return 'nine' else: raise ValueError('Only digits 0-9 are supported')
def make_fname_js_safe(fname: str) -> str: 'Converts a string filename to a javascript safe identifier.' if (fname[0] in string.digits): adj_for_digit = (digit_to_string(int(fname[0])) + fname[1:]) else: adj_for_digit = fname return adj_for_digit.replace('.', '_dot_').replace('-', '_')
def get_fits_image_size(fits_file: str) -> Tuple[(int, int)]: 'Returns image size (x, y)\n\n Args:\n fits_file (str): fits file path\n\n Returns:\n Tuple[int, int]: returns the x and y dims of the input file\n ' hdr = fits.getheader(fits_file) return (hdr['NAXIS1'], hdr['NAXIS2'])
def get_standard_image_size(image_file: str) -> Tuple[(int, int)]: 'Returns image size (x, y)\n\n Args:\n image_file (str): image file path\n\n Returns:\n Tuple[int, int]: returns the x and y dims of the input file\n ' with Image.open(image_file) as f: size = f.size return size
def peek_image_info(img_file_names: List[str]) -> Tuple[(int, int)]: 'Gets image size values given passed image file names\n\n Args:\n img_file_names (List[str]): Input image files that are being tiled\n\n Returns:\n Tuple[int, int]: The `max x`, and `max y`\n ' fits_sizes = list(map(get_fits_image_size, filter((lambda f: f.endswith('fits')), img_file_names))) standard_sizes = list(map(get_standard_image_size, filterfalse((lambda f: f.endswith('fits')), img_file_names))) (max_x, max_y) = reduce((lambda x, y: (max(x[0], y[0]), max(x[1], y[1]))), chain.from_iterable([fits_sizes, standard_sizes]), (0, 0)) return (max_x, max_y)
def get_version(): with open(os.path.join(fitsmap.__path__[0], '__version__.py'), 'r') as f: return f.readline().strip().replace('"', '')
def backpressure_queue(wait_f: Callable, work_f: Callable, f_args: List[List[Any]], pbar_ref: Tuple[(int, queue.Queue)], n_parallel_jobs: int, batch_size: int=1) -> None: 'A queue that will limit things processed in parallel.\n\n Args:\n wait_f (Callable): A function that will block until a process is finished\n work_f (Callable): A function that accepts a single element from args\n f_args (List[List[Any]]): A list of function args for work_f\n bar (tqdm): A tqdm progress bar\n n_parallel_jobs (int): The number of args to process in parallel\n\n Returns:\n None\n ' in_progress = [work_f(*f_args.pop(0)) for _ in zip(range(n_parallel_jobs), range(len(f_args)))] while in_progress: (_, in_progress) = wait_f(in_progress) OutputManager.update(pbar_ref, batch_size) if f_args: in_progress.append(work_f(*f_args.pop(0))) elif in_progress: pass else: break
class MockQueue(): def __init__(self, bar): self.bar = bar def put(self, n): self.bar.update(n=n)
def read(fname): 'Helper for README file.' return open(os.path.join(os.path.dirname(__file__), fname)).read()
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='Generation demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('img_path', help='path to input image file') parser.add_argument('save_path', help='path to save generation result') parser.add_argument('--unpaired-path', default=None, help='path to unpaired image file') parser.add_argument('--imshow', action='store_true', help='whether show image with opencv') parser.add_argument('--device', type=int, default=0, help='CUDA device id') args = parser.parse_args() return args
def main(): args = parse_args() model = init_model(args.config, args.checkpoint, device=torch.device('cuda', args.device)) output = generation_inference(model, args.img_path, args.unpaired_path) mmcv.imwrite(output, args.save_path) if args.imshow: mmcv.imshow(output, 'predicted generation result')
def parse_args(): parser = argparse.ArgumentParser(description='Matting demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('img_path', help='path to input image file') parser.add_argument('trimap_path', help='path to input trimap file') parser.add_argument('save_path', help='path to save alpha matte result') parser.add_argument('--imshow', action='store_true', help='whether show image with opencv') parser.add_argument('--device', type=int, default=0, help='CUDA device id') args = parser.parse_args() return args
def main(): args = parse_args() model = init_model(args.config, args.checkpoint, device=torch.device('cuda', args.device)) pred_alpha = (matting_inference(model, args.img_path, args.trimap_path) * 255) mmcv.imwrite(pred_alpha, args.save_path) if args.imshow: mmcv.imshow(pred_alpha, 'predicted alpha matte')
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='Restoration demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('img-path', help='path to input image file') parser.add_argument('save-path', help='path to save restoration result') parser.add_argument('--imshow', action='store_true', help='whether show image with opencv') parser.add_argument('--device', type=int, default=0, help='CUDA device id') parser.add_argument('--ref-path', default=None, help='path to reference image file') args = parser.parse_args() return args
def main(): args = parse_args() if (not os.path.isfile(args.img_path)): raise ValueError('It seems that you did not input a valid "image_path". Please double check your input, or you may want to use "restoration_video_demo.py" for video restoration.') if (args.ref_path and (not os.path.isfile(args.ref_path))): raise ValueError('It seems that you did not input a valid "ref_path". Please double check your input, or you may want to use "ref_path=None" for single restoration.') model = init_model(args.config, args.checkpoint, device=torch.device('cuda', args.device)) if args.ref_path: output = restoration_inference(model, args.img_path, args.ref_path) else: output = restoration_inference(model, args.img_path) output = tensor2img(output) mmcv.imwrite(output, args.save_path) if args.imshow: mmcv.imshow(output, 'predicted restoration result')
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='Restoration demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('img_path', help='path to input image file') parser.add_argument('save_path', help='path to save restoration result') parser.add_argument('--upscale-factor', type=int, default=1, help='the number of times the input image is upsampled.') parser.add_argument('--face-size', type=int, default=1024, help='the size of the cropped and aligned faces..') parser.add_argument('--imshow', action='store_true', help='whether show image with opencv') parser.add_argument('--device', type=int, default=0, help='CUDA device id') args = parser.parse_args() return args
def main(): args = parse_args() if (not os.path.isfile(args.img_path)): raise ValueError('It seems that you did not input a valid "image_path". Please double check your input, or you may want to use "restoration_video_demo.py" for video restoration.') model = init_model(args.config, args.checkpoint, device=torch.device('cuda', args.device)) output = restoration_face_inference(model, args.img_path, args.upscale_factor, args.face_size) mmcv.imwrite(output, args.save_path) if args.imshow: mmcv.imshow(output, 'predicted restoration result')
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='Restoration demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('input_dir', help='directory of the input video') parser.add_argument('output_dir', help='directory of the output video') parser.add_argument('--fps', type=float, default=0, help='frame rate of the output video, which is needed when `fps-multiplier` is 0 and a video is desired as output.') parser.add_argument('--fps-multiplier', type=float, default=0, help='multiply the fps based on the input video, if `fps-multiplier` is 0, `fps` will be utilized.') parser.add_argument('--start-idx', type=int, default=0, help='the index of the first frame to be processed in the sequence') parser.add_argument('--end-idx', type=int, default=None, help='The index corresponds to the last interpolated frame in thesequence. If it is None, interpolate to the last frame of videoor sequence. Default: None.') parser.add_argument('--batch-size', type=int, default=4, help='batch size of video interpolation model') parser.add_argument('--filename-tmpl', default='{:08d}.png', help='template of the file names') parser.add_argument('--device', type=int, default=None, help='CUDA device id') args = parser.parse_args() return args
def main(): " Demo for video interpolation models.\n\n Note that we accept video as input(output), when 'input_dir'('output_dir')\n is set to the path to the video. But using videos introduces video\n compression, which lower the visual quality. If you want actual quality,\n please save them as separate images (.png).\n " args = parse_args() if (args.device < 0): device = torch.device('cpu') else: device = torch.device('cuda', args.device) model = init_model(args.config, args.checkpoint, device=device) video_interpolation_inference(model=model, input_dir=args.input_dir, start_idx=args.start_idx, end_idx=args.end_idx, batch_size=args.batch_size, fps_multiplier=args.fps_multiplier, fps=args.fps, output_dir=args.output_dir, filename_tmpl=args.filename_tmpl)
def builder_inited_handler(app): subprocess.run(['bash', './merge_docs.sh']) subprocess.run(['python', './stat.py'])
def setup(app): app.connect('builder-inited', builder_inited_handler)
def anchor(name): return re.sub('-+', '-', re.sub('[^a-zA-Z0-9\\+]', '-', name.strip().lower())).strip('-')
def builder_inited_handler(app): subprocess.run(['./merge_docs.sh']) subprocess.run(['./stat.py'])
def setup(app): app.connect('builder-inited', builder_inited_handler)
def anchor(name): return re.sub('-+', '-', re.sub('[^a-zA-Z0-9\\+]', '-', name.strip().lower())).strip('-')
def generation_inference(model, img, img_unpaired=None): 'Inference image with the model.\n\n Args:\n model (nn.Module): The loaded model.\n img (str): File path of input image.\n img_unpaired (str, optional): File path of the unpaired image.\n If not None, perform unpaired image generation. Default: None.\n\n Returns:\n np.ndarray: The predicted generation result.\n ' cfg = model.cfg device = next(model.parameters()).device test_pipeline = Compose(cfg.test_pipeline) if (img_unpaired is None): data = dict(pair_path=img) else: data = dict(img_a_path=img, img_b_path=img_unpaired) data = test_pipeline(data) data = scatter(collate([data], samples_per_gpu=1), [device])[0] with torch.no_grad(): results = model(test_mode=True, **data) if (img_unpaired is None): if model.show_input: output = np.concatenate([tensor2img(results['real_a'], min_max=((- 1), 1)), tensor2img(results['fake_b'], min_max=((- 1), 1)), tensor2img(results['real_b'], min_max=((- 1), 1))], axis=1) else: output = tensor2img(results['fake_b'], min_max=((- 1), 1)) elif model.show_input: output = np.concatenate([tensor2img(results['real_a'], min_max=((- 1), 1)), tensor2img(results['fake_b'], min_max=((- 1), 1)), tensor2img(results['real_b'], min_max=((- 1), 1)), tensor2img(results['fake_a'], min_max=((- 1), 1))], axis=1) elif (model.test_direction == 'a2b'): output = tensor2img(results['fake_b'], min_max=((- 1), 1)) else: output = tensor2img(results['fake_a'], min_max=((- 1), 1)) return output
def inpainting_inference(model, masked_img, mask): 'Inference image with the model.\n\n Args:\n model (nn.Module): The loaded model.\n masked_img (str): File path of image with mask.\n mask (str): Mask file path.\n\n Returns:\n Tensor: The predicted inpainting result.\n ' device = next(model.parameters()).device infer_pipeline = [dict(type='LoadImageFromFile', key='masked_img'), dict(type='LoadMask', mask_mode='file', mask_config=dict()), dict(type='Pad', keys=['masked_img', 'mask'], mode='reflect'), dict(type='Normalize', keys=['masked_img'], mean=([127.5] * 3), std=([127.5] * 3), to_rgb=False), dict(type='GetMaskedImage', img_name='masked_img'), dict(type='Collect', keys=['masked_img', 'mask'], meta_keys=['masked_img_path']), dict(type='ImageToTensor', keys=['masked_img', 'mask'])] test_pipeline = Compose(infer_pipeline) data = dict(masked_img_path=masked_img, mask_path=mask) data = test_pipeline(data) data = scatter(collate([data], samples_per_gpu=1), [device])[0] with torch.no_grad(): result = model(test_mode=True, **data) return result['fake_img']
def init_model(config, checkpoint=None, device='cuda:0'): "Initialize a model from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n device (str): Which device the model will deploy. Default: 'cuda:0'.\n\n Returns:\n nn.Module: The constructed model.\n " if isinstance(config, str): config = mmcv.Config.fromfile(config) elif (not isinstance(config, mmcv.Config)): raise TypeError(f'config must be a filename or Config object, but got {type(config)}') config.model.pretrained = None config.test_cfg.metrics = None model = build_model(config.model, test_cfg=config.test_cfg) if (checkpoint is not None): _ = load_checkpoint(model, checkpoint) model.cfg = config model.to(device) model.eval() return model
def matting_inference(model, img, trimap): 'Inference image(s) with the model.\n\n Args:\n model (nn.Module): The loaded model.\n img (str): Image file path.\n trimap (str): Trimap file path.\n\n Returns:\n np.ndarray: The predicted alpha matte.\n ' cfg = model.cfg device = next(model.parameters()).device keys_to_remove = ['alpha', 'ori_alpha'] for key in keys_to_remove: for pipeline in list(cfg.test_pipeline): if (('key' in pipeline) and (key == pipeline['key'])): cfg.test_pipeline.remove(pipeline) if (('keys' in pipeline) and (key in pipeline['keys'])): pipeline['keys'].remove(key) if (len(pipeline['keys']) == 0): cfg.test_pipeline.remove(pipeline) if (('meta_keys' in pipeline) and (key in pipeline['meta_keys'])): pipeline['meta_keys'].remove(key) test_pipeline = Compose(cfg.test_pipeline) data = dict(merged_path=img, trimap_path=trimap) data = test_pipeline(data) data = scatter(collate([data], samples_per_gpu=1), [device])[0] with torch.no_grad(): result = model(test_mode=True, **data) return result['pred_alpha']
def restoration_inference(model, img, ref=None): 'Inference image with the model.\n\n Args:\n model (nn.Module): The loaded model.\n img (str): File path of input image.\n ref (str | None): File path of reference image. Default: None.\n\n Returns:\n Tensor: The predicted restoration result.\n ' cfg = model.cfg device = next(model.parameters()).device keys_to_remove = ['gt', 'gt_path'] for key in keys_to_remove: for pipeline in list(cfg.test_pipeline): if (('key' in pipeline) and (key == pipeline['key'])): cfg.test_pipeline.remove(pipeline) if (('keys' in pipeline) and (key in pipeline['keys'])): pipeline['keys'].remove(key) if (len(pipeline['keys']) == 0): cfg.test_pipeline.remove(pipeline) if (('meta_keys' in pipeline) and (key in pipeline['meta_keys'])): pipeline['meta_keys'].remove(key) test_pipeline = Compose(cfg.test_pipeline) if ref: data = dict(lq_path=img, ref_path=ref) else: data = dict(lq_path=img) data = test_pipeline(data) data = scatter(collate([data], samples_per_gpu=1), [device])[0] with torch.no_grad(): result = model(test_mode=True, **data) return result['output']
def single_gpu_test(model, data_loader, save_image=False, save_path=None, iteration=None): 'Test model with a single gpu.\n\n This method tests model with a single gpu and displays test progress bar.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n save_image (bool): Whether save image. Default: False.\n save_path (str): The path to save image. Default: None.\n iteration (int): Iteration number. It is used for the save image name.\n Default: None.\n\n Returns:\n list: The prediction results.\n ' if (save_image and (save_path is None)): raise ValueError("When 'save_image' is True, you should also set 'save_path'.") model.eval() results = [] dataset = data_loader.dataset prog_bar = mmcv.ProgressBar(len(dataset)) for data in data_loader: with torch.no_grad(): result = model(test_mode=True, save_image=save_image, save_path=save_path, iteration=iteration, **data) results.append(result) for (_, v) in data.items(): if isinstance(v, torch.Tensor): batch_size = v.size(0) break for _ in range(batch_size): prog_bar.update() return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False, save_image=False, save_path=None, iteration=None, empty_cache=False): "Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n save_image (bool): Whether save image. Default: False.\n save_path (str): The path to save image. Default: None.\n iteration (int): Iteration number. It is used for the save image name.\n Default: None.\n empty_cache (bool): empty cache in every iteration. Default: False.\n\n Returns:\n list: The prediction results.\n " if (save_image and (save_path is None)): raise ValueError("When 'save_image' is True, you should also set 'save_path'.") model.eval() results = [] dataset = data_loader.dataset (rank, world_size) = get_dist_info() if (rank == 0): prog_bar = mmcv.ProgressBar(len(dataset)) for data in data_loader: with torch.no_grad(): result = model(test_mode=True, save_image=save_image, save_path=save_path, iteration=iteration, **data) results.append(result) if empty_cache: torch.cuda.empty_cache() if (rank == 0): for (_, v) in data.items(): if isinstance(v, torch.Tensor): batch_size = v.size(0) break for _ in range((batch_size * world_size)): prog_bar.update() if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results
def collect_results_cpu(result_part, size, tmpdir=None): "Collect results in cpu mode.\n\n It saves the results on different gpus to 'tmpdir' and collects\n them by the rank 0 worker.\n\n Args:\n result_part (list): Results to be collected\n size (int): Result size.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode. Default: None\n\n Returns:\n list: Ordered results.\n " (rank, world_size) = get_dist_info() if (tmpdir is None): MAX_LEN = 512 dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda') if (rank == 0): mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) dist.barrier() mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) dist.barrier() if (rank != 0): return None part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) part_list.append(mmcv.load(part_file)) ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) ordered_results = ordered_results[:size] shutil.rmtree(tmpdir) return ordered_results
def collect_results_gpu(result_part, size): 'Collect results in gpu mode.\n\n It encodes results to gpu tensors and use gpu communication for results\n collection.\n\n Args:\n result_part (list): Results to be collected\n size (int): Result size.\n\n Returns:\n list: Ordered results.\n ' (rank, world_size) = get_dist_info() part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') shape_tensor = torch.tensor(part_tensor.shape, device='cuda') shape_list = [shape_tensor.clone() for _ in range(world_size)] dist.all_gather(shape_list, shape_tensor) shape_max = torch.tensor(shape_list).max() part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') part_send[:shape_tensor[0]] = part_tensor part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)] dist.all_gather(part_recv_list, part_send) if (rank != 0): return None part_list = [] for (recv, shape) in zip(part_recv_list, shape_list): part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) ordered_results = ordered_results[:size] return ordered_results
def init_random_seed(seed=None, device='cuda'): "Initialize random seed.\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n Returns:\n int: Seed to be used.\n " if (seed is not None): return seed (rank, world_size) = get_dist_info() seed = np.random.randint((2 ** 31)) if (world_size == 1): return seed if (rank == 0): random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
def set_random_seed(seed, deterministic=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n ' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ['PYTHONHASHSEED'] = str(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def train_model(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): 'Train model entry function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (:obj:`Dataset`): Train dataset.\n cfg (dict): The config dict for training.\n distributed (bool): Whether to use distributed training.\n Default: False.\n validate (bool): Whether to do evaluation. Default: False.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None\n ' logger = get_root_logger(log_level=cfg.log_level) if distributed: _dist_train(model, dataset, cfg, validate=validate, logger=logger, timestamp=timestamp, meta=meta) else: _non_dist_train(model, dataset, cfg, validate=validate, logger=logger, timestamp=timestamp, meta=meta)
def _dist_train(model, dataset, cfg, validate=False, logger=None, timestamp=None, meta=None): 'Distributed training function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (:obj:`Dataset`): Train dataset.\n cfg (dict): The config dict for training.\n validate (bool): Whether to do evaluation. Default: False.\n logger (logging.Logger | None): Logger for training. Default: None.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None.\n ' dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) loader_cfg = {**dict(seed=cfg.get('seed'), drop_last=False, dist=True), **({} if (torch.__version__ != 'parrots') else dict(prefetch_num=2, pin_memory=False)), **dict(((k, cfg.data[k]) for k in ['samples_per_gpu', 'workers_per_gpu', 'shuffle', 'seed', 'drop_last', 'prefetch_num', 'pin_memory'] if (k in cfg.data)))} train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {})) data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] find_unused_parameters = cfg.get('find_unused_parameters', False) model = DistributedDataParallelWrapper(model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) optimizer = build_optimizers(model, cfg.optimizers) runner = IterBasedRunner(model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta) runner.timestamp = timestamp runner.register_training_hooks(cfg.lr_config, checkpoint_config=cfg.checkpoint_config, log_config=cfg.log_config) if (cfg.get('visual_config', None) is not None): cfg.visual_config['output_dir'] = os.path.join(cfg.work_dir, cfg.visual_config['output_dir']) runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS)) if (validate and (cfg.get('evaluation', None) is not None)): dataset = build_dataset(cfg.data.val) if (('val_samples_per_gpu' in cfg.data) or ('val_workers_per_gpu' in cfg.data)): warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have been deprecated. Please use "val_dataloader=dict(samples_per_gpu=1)" instead. Details see https://github.com/open-mmlab/mmediting/pull/201') val_loader_cfg = {**loader_cfg, **dict(shuffle=False, drop_last=False), **dict(((newk, cfg.data[oldk]) for (oldk, newk) in [('val_samples_per_gpu', 'samples_per_gpu'), ('val_workers_per_gpu', 'workers_per_gpu')] if (oldk in cfg.data))), **cfg.data.get('val_dataloader', {})} data_loader = build_dataloader(dataset, **val_loader_cfg) save_path = osp.join(cfg.work_dir, 'val_visuals') runner.register_hook(DistEvalIterHook(data_loader, save_path=save_path, **cfg.evaluation), priority='LOW') if cfg.get('custom_hooks', None): custom_hooks = cfg.custom_hooks assert isinstance(custom_hooks, list), f'custom_hooks expect list type, but got {type(custom_hooks)}' for hook_cfg in cfg.custom_hooks: assert isinstance(hook_cfg, dict), f'Each item in custom_hooks expects dict type, but got {type(hook_cfg)}' hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') hook = build_from_cfg(hook_cfg, HOOKS) runner.register_hook(hook, priority=priority) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_iters)
def _non_dist_train(model, dataset, cfg, validate=False, logger=None, timestamp=None, meta=None): 'Non-Distributed training function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (:obj:`Dataset`): Train dataset.\n cfg (dict): The config dict for training.\n validate (bool): Whether to do evaluation. Default: False.\n logger (logging.Logger | None): Logger for training. Default: None.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None.\n ' dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) loader_cfg = {**dict(seed=cfg.get('seed'), drop_last=False, dist=False, num_gpus=cfg.gpus), **({} if (torch.__version__ != 'parrots') else dict(prefetch_num=2, pin_memory=False)), **dict(((k, cfg.data[k]) for k in ['samples_per_gpu', 'workers_per_gpu', 'shuffle', 'seed', 'drop_last', 'prefetch_num', 'pin_memory'] if (k in cfg.data)))} train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {})) data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] model = MMDataParallel(model, device_ids=range(cfg.gpus)) optimizer = build_optimizers(model, cfg.optimizers) runner = IterBasedRunner(model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta) runner.timestamp = timestamp runner.register_training_hooks(cfg.lr_config, checkpoint_config=cfg.checkpoint_config, log_config=cfg.log_config) if (cfg.get('visual_config', None) is not None): cfg.visual_config['output_dir'] = os.path.join(cfg.work_dir, cfg.visual_config['output_dir']) runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS)) if (validate and (cfg.get('evaluation', None) is not None)): dataset = build_dataset(cfg.data.val) if (('val_samples_per_gpu' in cfg.data) or ('val_workers_per_gpu' in cfg.data)): warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have been deprecated. Please use "val_dataloader=dict(samples_per_gpu=1)" instead. Details see https://github.com/open-mmlab/mmediting/pull/201') val_loader_cfg = {**loader_cfg, **dict(shuffle=False, drop_last=False), **dict(((newk, cfg.data[oldk]) for (oldk, newk) in [('val_samples_per_gpu', 'samples_per_gpu'), ('val_workers_per_gpu', 'workers_per_gpu')] if (oldk in cfg.data))), **cfg.data.get('val_dataloader', {})} data_loader = build_dataloader(dataset, **val_loader_cfg) save_path = osp.join(cfg.work_dir, 'val_visuals') runner.register_hook(EvalIterHook(data_loader, save_path=save_path, **cfg.evaluation), priority='LOW') if cfg.get('custom_hooks', None): custom_hooks = cfg.custom_hooks assert isinstance(custom_hooks, list), f'custom_hooks expect list type, but got {type(custom_hooks)}' for hook_cfg in cfg.custom_hooks: assert isinstance(hook_cfg, dict), f'Each item in custom_hooks expects dict type, but got {type(hook_cfg)}' hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') hook = build_from_cfg(hook_cfg, HOOKS) runner.register_hook(hook, priority=priority) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_iters)
def read_image(filepath): 'Read image from file.\n\n Args:\n filepath (str): File path.\n\n Returns:\n image (np.array): Image.\n ' img_bytes = FILE_CLIENT.get(filepath) image = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb', backend='pillow') return image
def read_frames(source, start_index, num_frames, from_video, end_index): 'Read frames from file or video.\n\n Args:\n source (list | mmcv.VideoReader): Source of frames.\n start_index (int): Start index of frames.\n num_frames (int): frames number to be read.\n from_video (bool): Weather read frames from video.\n end_index (int): The end index of frames.\n\n Returns:\n images (np.array): Images.\n ' images = [] last_index = min((start_index + num_frames), end_index) if from_video: for index in range(start_index, last_index): if (index >= source.frame_cnt): break images.append(np.flip(source.get_frame(index), axis=2)) else: files = source[start_index:last_index] images = [read_image(f) for f in files] return images
def video_interpolation_inference(model, input_dir, output_dir, start_idx=0, end_idx=None, batch_size=4, fps_multiplier=0, fps=0, filename_tmpl='{:08d}.png'): "Inference image with the model.\n\n Args:\n model (nn.Module): The loaded model.\n input_dir (str): Directory of the input video.\n output_dir (str): Directory of the output video.\n start_idx (int): The index corresponding to the first frame in the\n sequence. Default: 0\n end_idx (int | None): The index corresponding to the last interpolated\n frame in the sequence. If it is None, interpolate to the last\n frame of video or sequence. Default: None\n batch_size (int): Batch size. Default: 4\n fps_multiplier (float): multiply the fps based on the input video.\n Default: 0.\n fps (float): frame rate of the output video. Default: 0.\n filename_tmpl (str): template of the file names. Default: '{:08d}.png'\n\n Returns:\n output (list[numpy.array]): The predicted interpolation result.\n It is an image sequence.\n input_fps (float): The fps of input video. If the input is an image\n sequence, input_fps=0.0\n " device = next(model.parameters()).device if model.cfg.get('demo_pipeline', None): test_pipeline = model.cfg.demo_pipeline elif model.cfg.get('test_pipeline', None): test_pipeline = model.cfg.test_pipeline else: test_pipeline = model.cfg.val_pipeline tmp_pipeline = [] for pipeline in test_pipeline: if (pipeline['type'] not in ['GenerateSegmentIndices', 'LoadImageFromFileList', 'LoadImageFromFile']): tmp_pipeline.append(pipeline) test_pipeline = tmp_pipeline test_pipeline = Compose(test_pipeline) input_file_extension = os.path.splitext(input_dir)[1] if (input_file_extension in VIDEO_EXTENSIONS): source = mmcv.VideoReader(input_dir) input_fps = source.fps length = source.frame_cnt from_video = True (h, w) = (source.height, source.width) if fps_multiplier: assert (fps_multiplier > 0), '`fps_multiplier` cannot be negative' output_fps = (fps_multiplier * input_fps) else: output_fps = (fps if (fps > 0) else (input_fps * 2)) else: files = os.listdir(input_dir) files = [osp.join(input_dir, f) for f in files] files.sort() source = files length = files.__len__() from_video = False example_frame = read_image(files[0]) (h, w) = example_frame.shape[:2] output_fps = fps output_file_extension = os.path.splitext(output_dir)[1] if (output_file_extension in VIDEO_EXTENSIONS): fourcc = cv2.VideoWriter_fourcc(*'mp4v') target = cv2.VideoWriter(output_dir, fourcc, output_fps, (w, h)) to_video = True else: to_video = False end_idx = (min(end_idx, length) if (end_idx is not None) else length) step_size = (model.step_frames * batch_size) lenth_per_step = (model.required_frames + (model.step_frames * (batch_size - 1))) repeat_frame = (model.required_frames - model.step_frames) prog_bar = mmcv.ProgressBar(math.ceil(((((end_idx + step_size) - lenth_per_step) - start_idx) / step_size))) output_index = start_idx for start_index in range(start_idx, end_idx, step_size): images = read_frames(source, start_index, lenth_per_step, from_video, end_index=end_idx) data = dict(inputs=images, inputs_path=None, key=input_dir) data = [test_pipeline(data)] data = collate(data, samples_per_gpu=1)['inputs'] data = model.split_frames(data) input_tensors = data.clone().detach() with torch.no_grad(): output = model(data.to(device), test_mode=True)['output'] if (len(output.shape) == 4): output = output.unsqueeze(1) output_tensors = output.cpu() if (len(output_tensors.shape) == 4): output_tensors = output_tensors.unsqueeze(1) result = model.merge_frames(input_tensors, output_tensors) if (not (start_idx == start_index)): result = result[(0 - repeat_frame):] prog_bar.update() if to_video: for frame in result: target.write(frame) else: for frame in result: save_path = osp.join(output_dir, filename_tmpl.format(output_index)) mmcv.imwrite(frame, save_path) output_index += 1 if ((start_index + lenth_per_step) >= end_idx): break print() print(f'Output dir: {output_dir}') if to_video: target.release()
@MODULE_WRAPPERS.register_module() class DistributedDataParallelWrapper(nn.Module): "A DistributedDataParallel wrapper for models in MMediting.\n\n In MMedting, there is a need to wrap different modules in the models\n with separate DistributedDataParallel. Otherwise, it will cause\n errors for GAN training.\n More specific, the GAN model, usually has two sub-modules:\n generator and discriminator. If we wrap both of them in one\n standard DistributedDataParallel, it will cause errors during training,\n because when we update the parameters of the generator (or discriminator),\n the parameters of the discriminator (or generator) is not updated, which is\n not allowed for DistributedDataParallel.\n So we design this wrapper to separately wrap DistributedDataParallel\n for generator and discriminator.\n\n In this wrapper, we perform two operations:\n 1. Wrap the modules in the models with separate MMDistributedDataParallel.\n Note that only modules with parameters will be wrapped.\n 2. Do scatter operation for 'forward', 'train_step' and 'val_step'.\n\n Note that the arguments of this wrapper is the same as those in\n `torch.nn.parallel.distributed.DistributedDataParallel`.\n\n Args:\n module (nn.Module): Module that needs to be wrapped.\n device_ids (list[int | `torch.device`]): Same as that in\n `torch.nn.parallel.distributed.DistributedDataParallel`.\n dim (int, optional): Same as that in the official scatter function in\n pytorch. Defaults to 0.\n broadcast_buffers (bool): Same as that in\n `torch.nn.parallel.distributed.DistributedDataParallel`.\n Defaults to False.\n find_unused_parameters (bool, optional): Same as that in\n `torch.nn.parallel.distributed.DistributedDataParallel`.\n Traverse the autograd graph of all tensors contained in returned\n value of the wrapped module’s forward function. Defaults to False.\n kwargs (dict): Other arguments used in\n `torch.nn.parallel.distributed.DistributedDataParallel`.\n " def __init__(self, module, device_ids, dim=0, broadcast_buffers=False, find_unused_parameters=False, **kwargs): super().__init__() assert (len(device_ids) == 1), f'Currently, DistributedDataParallelWrapper only supports onesingle CUDA device for each process.The length of device_ids must be 1, but got {len(device_ids)}.' self.module = module self.dim = dim self.to_ddp(device_ids=device_ids, dim=dim, broadcast_buffers=broadcast_buffers, find_unused_parameters=find_unused_parameters, **kwargs) self.output_device = _get_device_index(device_ids[0], True) def to_ddp(self, device_ids, dim, broadcast_buffers, find_unused_parameters, **kwargs): 'Wrap models with separate MMDistributedDataParallel.\n\n It only wraps the modules with parameters.\n ' for (name, module) in self.module._modules.items(): if (next(module.parameters(), None) is None): module = module.cuda() elif all(((not p.requires_grad) for p in module.parameters())): module = module.cuda() else: module = MMDistributedDataParallel(module.cuda(), device_ids=device_ids, dim=dim, broadcast_buffers=broadcast_buffers, find_unused_parameters=find_unused_parameters, **kwargs) self.module._modules[name] = module def scatter(self, inputs, kwargs, device_ids): 'Scatter function.\n\n Args:\n inputs (Tensor): Input Tensor.\n kwargs (dict): Args for\n ``mmcv.parallel.scatter_gather.scatter_kwargs``.\n device_ids (int): Device id.\n ' return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) def forward(self, *inputs, **kwargs): 'Forward function.\n\n Args:\n inputs (tuple): Input data.\n kwargs (dict): Args for\n ``mmcv.parallel.scatter_gather.scatter_kwargs``.\n ' (inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()]) return self.module(*inputs[0], **kwargs[0]) def train_step(self, *inputs, **kwargs): 'Train step function.\n\n Args:\n inputs (Tensor): Input Tensor.\n kwargs (dict): Args for\n ``mmcv.parallel.scatter_gather.scatter_kwargs``.\n ' (inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()]) output = self.module.train_step(*inputs[0], **kwargs[0]) return output def val_step(self, *inputs, **kwargs): 'Validation step function.\n\n Args:\n inputs (tuple): Input data.\n kwargs (dict): Args for ``scatter_kwargs``.\n ' (inputs, kwargs) = self.scatter(inputs, kwargs, [torch.cuda.current_device()]) output = self.module.val_step(*inputs[0], **kwargs[0]) return output
class EvalIterHook(Hook): 'Non-Distributed evaluation hook for iteration-based runner.\n\n This hook will regularly perform evaluation in a given interval when\n performing in non-distributed environment.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval. Default: 1.\n eval_kwargs (dict): Other eval kwargs. It contains:\n save_image (bool): Whether to save image.\n save_path (str): The path to save image.\n ' def __init__(self, dataloader, interval=1, **eval_kwargs): if (not isinstance(dataloader, DataLoader)): raise TypeError(f'dataloader must be a pytorch DataLoader, but got {type(dataloader)}') self.dataloader = dataloader self.interval = interval self.eval_kwargs = eval_kwargs self.save_image = self.eval_kwargs.pop('save_image', False) self.save_path = self.eval_kwargs.pop('save_path', None) def after_train_iter(self, runner): 'The behavior after each train iteration.\n\n Args:\n runner (``mmcv.runner.BaseRunner``): The runner.\n ' if (not self.every_n_iters(runner, self.interval)): return runner.log_buffer.clear() from mmedit.apis import single_gpu_test results = single_gpu_test(runner.model, self.dataloader, save_image=self.save_image, save_path=self.save_path, iteration=runner.iter) self.evaluate(runner, results) def evaluate(self, runner, results): 'Evaluation function.\n\n Args:\n runner (``mmcv.runner.BaseRunner``): The runner.\n results (dict): Model forward results.\n ' eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs) for (name, val) in eval_res.items(): runner.log_buffer.output[name] = val runner.log_buffer.ready = True
class DistEvalIterHook(EvalIterHook): 'Distributed evaluation hook.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader.\n interval (int): Evaluation interval. Default: 1.\n tmpdir (str | None): Temporary directory to save the results of all\n processes. Default: None.\n gpu_collect (bool): Whether to use gpu or cpu to collect results.\n Default: False.\n eval_kwargs (dict): Other eval kwargs. It may contain:\n save_image (bool): Whether save image.\n save_path (str): The path to save image.\n ' def __init__(self, dataloader, interval=1, gpu_collect=False, **eval_kwargs): super().__init__(dataloader, interval, **eval_kwargs) self.gpu_collect = gpu_collect def after_train_iter(self, runner): 'The behavior after each train iteration.\n\n Args:\n runner (``mmcv.runner.BaseRunner``): The runner.\n ' if (not self.every_n_iters(runner, self.interval)): return runner.log_buffer.clear() from mmedit.apis import multi_gpu_test results = multi_gpu_test(runner.model, self.dataloader, tmpdir=osp.join(runner.work_dir, '.eval_hook'), gpu_collect=self.gpu_collect, save_image=self.save_image, save_path=self.save_path, iteration=runner.iter) if (runner.rank == 0): print('\n') self.evaluate(runner, results)
def gaussian(x, sigma): 'Gaussian function.\n\n Args:\n x (array_like): The independent variable.\n sigma (float): Standard deviation of the gaussian function.\n\n Return:\n ndarray or scalar: Gaussian value of `x`.\n ' return (np.exp(((- (x ** 2)) / (2 * (sigma ** 2)))) / (sigma * np.sqrt((2 * np.pi))))
def dgaussian(x, sigma): 'Gradient of gaussian.\n\n Args:\n x (array_like): The independent variable.\n sigma (float): Standard deviation of the gaussian function.\n\n Return:\n ndarray or scalar: Gradient of gaussian of `x`.\n ' return (((- x) * gaussian(x, sigma)) / (sigma ** 2))
def gauss_filter(sigma, epsilon=0.01): 'Gradient of gaussian.\n\n Args:\n sigma (float): Standard deviation of the gaussian kernel.\n epsilon (float): Small value used when calculating kernel size.\n Default: 1e-2.\n\n Return:\n tuple[ndarray]: Gaussian filter along x and y axis.\n ' half_size = np.ceil((sigma * np.sqrt(((- 2) * np.log(((np.sqrt((2 * np.pi)) * sigma) * epsilon)))))) size = int(((2 * half_size) + 1)) filter_x = np.zeros((size, size)) for i in range(size): for j in range(size): filter_x[(i, j)] = (gaussian((i - half_size), sigma) * dgaussian((j - half_size), sigma)) norm = np.sqrt((filter_x ** 2).sum()) filter_x = (filter_x / norm) filter_y = np.transpose(filter_x) return (filter_x, filter_y)
def gauss_gradient(img, sigma): 'Gaussian gradient.\n\n From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/\n submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/\n index.html\n\n Args:\n img (ndarray): Input image.\n sigma (float): Standard deviation of the gaussian kernel.\n\n Return:\n ndarray: Gaussian gradient of input `img`.\n ' (filter_x, filter_y) = gauss_filter(sigma) img_filtered_x = cv2.filter2D(img, (- 1), filter_x, borderType=cv2.BORDER_REPLICATE) img_filtered_y = cv2.filter2D(img, (- 1), filter_y, borderType=cv2.BORDER_REPLICATE) return np.sqrt(((img_filtered_x ** 2) + (img_filtered_y ** 2)))
def inference_with_session(sess, io_binding, output_names, input_tensor): device_type = input_tensor.device.type device_id = input_tensor.device.index device_id = (0 if (device_id is None) else device_id) io_binding.bind_input(name='input', device_type=device_type, device_id=device_id, element_type=np.float32, shape=input_tensor.shape, buffer_ptr=input_tensor.data_ptr()) for name in output_names: io_binding.bind_output(name) sess.run_with_iobinding(io_binding) pred = io_binding.copy_outputs_to_cpu() return pred
class ONNXRuntimeMattor(nn.Module): def __init__(self, sess, io_binding, output_names, base_model): super(ONNXRuntimeMattor, self).__init__() self.sess = sess self.io_binding = io_binding self.output_names = output_names self.base_model = base_model def forward(self, merged, trimap, meta, test_mode=False, save_image=False, save_path=None, iteration=None): input_tensor = torch.cat((merged, trimap), 1).contiguous() pred_alpha = inference_with_session(self.sess, self.io_binding, self.output_names, input_tensor)[0] pred_alpha = pred_alpha.squeeze() pred_alpha = self.base_model.restore_shape(pred_alpha, meta) eval_result = self.base_model.evaluate(pred_alpha, meta) if save_image: self.base_model.save_image(pred_alpha, meta, save_path, iteration) return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
class RestorerGenerator(nn.Module): def __init__(self, sess, io_binding, output_names): super(RestorerGenerator, self).__init__() self.sess = sess self.io_binding = io_binding self.output_names = output_names def forward(self, x): pred = inference_with_session(self.sess, self.io_binding, self.output_names, x)[0] pred = torch.from_numpy(pred) return pred
class ONNXRuntimeRestorer(nn.Module): def __init__(self, sess, io_binding, output_names, base_model): super(ONNXRuntimeRestorer, self).__init__() self.sess = sess self.io_binding = io_binding self.output_names = output_names self.base_model = base_model restorer_generator = RestorerGenerator(self.sess, self.io_binding, self.output_names) base_model.generator = restorer_generator def forward(self, lq, gt=None, test_mode=False, **kwargs): return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class ONNXRuntimeEditing(nn.Module): def __init__(self, onnx_file, cfg, device_id): super(ONNXRuntimeEditing, self).__init__() ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with ONNXRuntime from source.') session_options = ort.SessionOptions() if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) sess = ort.InferenceSession(onnx_file, session_options) providers = ['CPUExecutionProvider'] options = [{}] is_cuda_available = (ort.get_device() == 'GPU') if is_cuda_available: providers.insert(0, 'CUDAExecutionProvider') options.insert(0, {'device_id': device_id}) sess.set_providers(providers, options) self.sess = sess self.device_id = device_id self.io_binding = sess.io_binding() self.output_names = [_.name for _ in sess.get_outputs()] base_model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) if isinstance(base_model, BaseMattor): WrapperClass = ONNXRuntimeMattor elif isinstance(base_model, BasicRestorer): WrapperClass = ONNXRuntimeRestorer self.wrapper = WrapperClass(self.sess, self.io_binding, self.output_names, base_model) def forward(self, **kwargs): return self.wrapper(**kwargs)
@HOOKS.register_module() class ExponentialMovingAverageHook(Hook): "Exponential Moving Average Hook.\n\n Exponential moving average is a trick that widely used in current GAN\n literature, e.g., PGGAN, StyleGAN, and BigGAN. This general idea of it is\n maintaining a model with the same architecture, but its parameters are\n updated as a moving average of the trained weights in the original model.\n In general, the model with moving averaged weights achieves better\n performance.\n\n Args:\n module_keys (str | tuple[str]): The name of the ema model. Note that we\n require these keys are followed by '_ema' so that we can easily\n find the original model by discarding the last four characters.\n interp_mode (str, optional): Mode of the interpolation method.\n Defaults to 'lerp'.\n interp_cfg (dict | None, optional): Set arguments of the interpolation\n function. Defaults to None.\n interval (int, optional): Evaluation interval (by iterations).\n Default: -1.\n start_iter (int, optional): Start iteration for ema. If the start\n iteration is not reached, the weights of ema model will maintain\n the same as the original one. Otherwise, its parameters are updated\n as a moving average of the trained weights in the original model.\n Default: 0.\n " def __init__(self, module_keys, interp_mode='lerp', interp_cfg=None, interval=(- 1), start_iter=0): super().__init__() assert (isinstance(module_keys, str) or mmcv.is_tuple_of(module_keys, str)) self.module_keys = ((module_keys,) if isinstance(module_keys, str) else module_keys) for k in self.module_keys: assert k.endswith('_ema'), 'You should give keys that end with "_ema".' self.interp_mode = interp_mode self.interp_cfg = (dict() if (interp_cfg is None) else deepcopy(interp_cfg)) self.interval = interval self.start_iter = start_iter assert hasattr(self, interp_mode), f'Currently, we do not support {self.interp_mode} for EMA.' self.interp_func = partial(getattr(self, interp_mode), **self.interp_cfg) @staticmethod def lerp(a, b, momentum=0.999, momentum_nontrainable=0.0, trainable=True): m = (momentum if trainable else momentum_nontrainable) return (a + ((b - a) * m)) def every_n_iters(self, runner, n): if (runner.iter < self.start_iter): return True return (((((runner.iter + 1) - self.start_iter) % n) == 0) if (n > 0) else False) @torch.no_grad() def after_train_iter(self, runner): if (not self.every_n_iters(runner, self.interval)): return model = (runner.model.module if is_module_wrapper(runner.model) else runner.model) for key in self.module_keys: ema_net = getattr(model, key) states_ema = ema_net.state_dict(keep_vars=False) net = getattr(model, key[:(- 4)]) states_orig = net.state_dict(keep_vars=True) for (k, v) in states_orig.items(): if (runner.iter < self.start_iter): states_ema[k].data.copy_(v.data) else: states_ema[k] = self.interp_func(v, states_ema[k], trainable=v.requires_grad).detach() ema_net.load_state_dict(states_ema, strict=True) def before_run(self, runner): model = (runner.model.module if is_module_wrapper(runner.model) else runner.model) for k in self.module_keys: if ((not hasattr(model, k)) and (not hasattr(model, k[:(- 4)]))): raise RuntimeError(f'Cannot find both {k[:(- 4)]} and {k} network for EMA hook.') if ((not hasattr(model, k)) and hasattr(model, k[:(- 4)])): setattr(model, k, deepcopy(getattr(model, k[:(- 4)]))) warnings.warn(f'We do not suggest construct and initialize EMA model {k} in hook. You may explicitly define it by yourself.')
def build_optimizers(model, cfgs): "Build multiple optimizers from configs.\n\n If `cfgs` contains several dicts for optimizers, then a dict for each\n constructed optimizers will be returned.\n If `cfgs` only contains one optimizer config, the constructed optimizer\n itself will be returned.\n\n For example,\n\n 1) Multiple optimizer configs:\n\n .. code-block:: python\n\n optimizer_cfg = dict(\n model1=dict(type='SGD', lr=lr),\n model2=dict(type='SGD', lr=lr))\n\n The return dict is\n ``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``\n\n 2) Single optimizer config:\n\n .. code-block:: python\n\n optimizer_cfg = dict(type='SGD', lr=lr)\n\n The return is ``torch.optim.Optimizer``.\n\n Args:\n model (:obj:`nn.Module`): The model with parameters to be optimized.\n cfgs (dict): The config dict of the optimizer.\n\n Returns:\n dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:\n The initialized optimizers.\n " optimizers = {} if hasattr(model, 'module'): model = model.module is_dict_of_dict = True for (key, cfg) in cfgs.items(): if (not isinstance(cfg, dict)): is_dict_of_dict = False if is_dict_of_dict: for (key, cfg) in cfgs.items(): cfg_ = cfg.copy() module = getattr(model, key) optimizers[key] = build_optimizer(module, cfg_) return optimizers return build_optimizer(model, cfgs)
@HOOKS.register_module() class LinearLrUpdaterHook(LrUpdaterHook): "Linear learning rate scheduler for image generation.\n\n In the beginning, the learning rate is 'base_lr' defined in mmcv.\n We give a target learning rate 'target_lr' and a start point 'start'\n (iteration / epoch). Before 'start', we fix learning rate as 'base_lr';\n After 'start', we linearly update learning rate to 'target_lr'.\n\n Args:\n target_lr (float): The target learning rate. Default: 0.\n start (int): The start point (iteration / epoch, specified by args\n 'by_epoch' in its parent class in mmcv) to update learning rate.\n Default: 0.\n interval (int): The interval to update the learning rate. Default: 1.\n " def __init__(self, target_lr=0, start=0, interval=1, **kwargs): super().__init__(**kwargs) self.target_lr = target_lr self.start = start self.interval = interval def get_lr(self, runner, base_lr): 'Calculates the learning rate.\n\n Args:\n runner (object): The passed runner.\n base_lr (float): Base learning rate.\n\n Returns:\n float: Current learning rate.\n ' if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters assert (max_progress >= self.start) if (max_progress == self.start): return base_lr factor = ((max(0, (progress - self.start)) // self.interval) / ((max_progress - self.start) // self.interval)) return (base_lr + ((self.target_lr - base_lr) * factor))
def sync_random_seed(seed=None, device='cuda'): "Make sure different ranks share the same seed.\n All workers must call this function, otherwise it will deadlock.\n This method is generally used in `DistributedSampler`,\n because the seed should be identical across all processes\n in the distributed group.\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n Returns:\n int: Seed to be used.\n " if (seed is None): seed = np.random.randint((2 ** 31)) assert isinstance(seed, int) (rank, world_size) = get_dist_info() if (world_size == 1): return seed if (rank == 0): random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
class BaseDataset(Dataset, metaclass=ABCMeta): 'Base class for datasets.\n\n All datasets should subclass it.\n All subclasses should overwrite:\n\n ``load_annotations``, supporting to load information and generate\n image lists.\n\n Args:\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): If True, the dataset will work in test mode.\n Otherwise, in train mode.\n ' def __init__(self, pipeline, test_mode=False): super().__init__() self.test_mode = test_mode self.pipeline = Compose(pipeline) @abstractmethod def load_annotations(self): 'Abstract function for loading annotation.\n\n All subclasses should overwrite this function\n ' def prepare_train_data(self, idx): 'Prepare training data.\n\n Args:\n idx (int): Index of the training batch data.\n\n Returns:\n dict: Returned training batch.\n ' results = copy.deepcopy(self.data_infos[idx]) return self.pipeline(results) def prepare_test_data(self, idx): 'Prepare testing data.\n\n Args:\n idx (int): Index for getting each testing batch.\n\n Returns:\n Tensor: Returned testing batch.\n ' results = copy.deepcopy(self.data_infos[idx]) return self.pipeline(results) def __len__(self): 'Length of the dataset.\n\n Returns:\n int: Length of the dataset.\n ' return len(self.data_infos) def __getitem__(self, idx): 'Get item at each call.\n\n Args:\n idx (int): Index for getting each item.\n ' if self.test_mode: return self.prepare_test_data(idx) return self.prepare_train_data(idx)
class BaseGenerationDataset(BaseDataset): 'Base class for generation datasets.' @staticmethod def scan_folder(path): 'Obtain image path list (including sub-folders) from a given folder.\n\n Args:\n path (str | :obj:`Path`): Folder path.\n\n Returns:\n list[str]: Image list obtained from the given folder.\n ' if isinstance(path, (str, Path)): path = str(path) else: raise TypeError(f"'path' must be a str or a Path object, but received {type(path)}.") images = scandir(path, suffix=IMG_EXTENSIONS, recursive=True) images = [osp.join(path, v) for v in images] assert images, f'{path} has no valid image file.' return images def evaluate(self, results, logger=None): 'Evaluating with saving generated images. (needs no metrics)\n\n Args:\n results (list[tuple]): The output of forward_test() of the model.\n\n Return:\n dict: Evaluation results dict.\n ' if (not isinstance(results, list)): raise TypeError(f'results must be a list, but got {type(results)}') assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}' results = [res['saved_flag'] for res in results] saved_num = 0 for flag in results: if flag: saved_num += 1 eval_result = {'val_saved_number': saved_num} return eval_result
@DATASETS.register_module() class BaseMattingDataset(BaseDataset): 'Base image matting dataset.\n ' def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False): super().__init__(pipeline, test_mode) self.ann_file = str(ann_file) self.data_prefix = str(data_prefix) self.data_infos = self.load_annotations() def evaluate(self, results, logger=None): 'Evaluating with different metrics.\n\n Args:\n results (list[tuple]): The output of forward_test() of the model.\n\n Return:\n dict: Evaluation results dict.\n ' if (not isinstance(results, list)): raise TypeError(f'results must be a list, but got {type(results)}') assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}' results = [res['eval_result'] for res in results] eval_result = defaultdict(list) for res in results: for (metric, val) in res.items(): eval_result[metric].append(val) for (metric, val_list) in eval_result.items(): assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}' eval_result = {metric: (sum(values) / len(self)) for (metric, values) in eval_result.items()} return eval_result
class BaseSRDataset(BaseDataset): 'Base class for super resolution datasets.\n ' def __init__(self, pipeline, scale, test_mode=False): super().__init__(pipeline, test_mode) self.scale = scale @staticmethod def scan_folder(path): 'Obtain image path list (including sub-folders) from a given folder.\n\n Args:\n path (str | :obj:`Path`): Folder path.\n\n Returns:\n list[str]: image list obtained form given folder.\n ' if isinstance(path, (str, Path)): path = str(path) else: raise TypeError(f"'path' must be a str or a Path object, but received {type(path)}.") images = list(scandir(path, suffix=IMG_EXTENSIONS, recursive=True)) images = [osp.join(path, v) for v in images] assert images, f'{path} has no valid image file.' return images def __getitem__(self, idx): 'Get item at each call.\n\n Args:\n idx (int): Index for getting each item.\n ' results = copy.deepcopy(self.data_infos[idx]) results['scale'] = self.scale return self.pipeline(results) def evaluate(self, results, logger=None): 'Evaluate with different metrics.\n\n Args:\n results (list[tuple]): The output of forward_test() of the model.\n\n Return:\n dict: Evaluation results dict.\n ' if (not isinstance(results, list)): raise TypeError(f'results must be a list, but got {type(results)}') assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}' results = [res['eval_result'] for res in results] eval_result = defaultdict(list) for res in results: for (metric, val) in res.items(): eval_result[metric].append(val) for (metric, val_list) in eval_result.items(): assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}' eval_result = {metric: (sum(values) / len(self)) for (metric, values) in eval_result.items()} return eval_result
class BaseVFIDataset(BaseDataset): 'Base class for video frame interpolation datasets.\n ' def __init__(self, pipeline, folder, ann_file, test_mode=False): super().__init__(pipeline, test_mode) self.folder = str(folder) self.ann_file = str(ann_file) def __getitem__(self, idx): 'Get item at each call.\n\n Args:\n idx (int): Index for getting each item.\n ' results = copy.deepcopy(self.data_infos[idx]) results['folder'] = self.folder results['ann_file'] = self.ann_file return self.pipeline(results) def evaluate(self, results, logger=None): 'Evaluate with different metrics.\n\n Args:\n results (list[tuple]): The output of forward_test() of the model.\n\n Return:\n dict: Evaluation results dict.\n ' if (not isinstance(results, list)): raise TypeError(f'results must be a list, but got {type(results)}') assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}' results = [res['eval_result'] for res in results] eval_result = defaultdict(list) for res in results: for (metric, val) in res.items(): eval_result[metric].append(val) for (metric, val_list) in eval_result.items(): assert (len(val_list) == len(self)), f'Length of evaluation result of {metric} is {len(val_list)}, should be {len(self)}' eval_result = {metric: (sum(values) / len(self)) for (metric, values) in eval_result.items()} return eval_result def load_annotations(self): 'Abstract function for loading annotation.\n\n All subclasses should overwrite this function\n ' pass
def _concat_dataset(cfg, default_args=None): 'Concat datasets with different ann_file but the same type.\n\n Args:\n cfg (dict): The config of dataset.\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The concatenated dataset.\n ' ann_files = cfg['ann_file'] datasets = [] num_dset = len(ann_files) for i in range(num_dset): data_cfg = copy.deepcopy(cfg) data_cfg['ann_file'] = ann_files[i] datasets.append(build_dataset(data_cfg, default_args)) return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None): 'Build a dataset from config dict.\n\n It supports a variety of dataset config. If ``cfg`` is a Sequential (list\n or dict), it will be a concatenated dataset of the datasets specified by\n the Sequential. If it is a ``RepeatDataset``, then it will repeat the\n dataset ``cfg[\'dataset\']`` for ``cfg[\'times\']`` times. If the ``ann_file``\n of the dataset is a Sequential, then it will build a concatenated dataset\n with the same dataset type but different ``ann_file``.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n ' if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif (cfg['type'] == 'RepeatDataset'): dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times']) elif isinstance(cfg.get('ann_file'), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, persistent_workers=True, **kwargs): 'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (:obj:`Dataset`): A PyTorch dataset.\n samples_per_gpu (int): Number of samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data\n loading for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed\n training. Default: 1.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int | None): Seed to be used. Default: None.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: False\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n persistent_workers (bool): If True, the data loader will not shutdown\n the worker processes after a dataset has been consumed once.\n This allows to maintain the workers Dataset instances alive.\n The argument also has effect in PyTorch>=1.7.0.\n Default: True\n kwargs (dict, optional): Any keyword argument to be used to initialize\n DataLoader.\n\n Returns:\n DataLoader: A PyTorch dataloader.\n ' (rank, world_size) = get_dist_info() if dist: sampler = DistributedSampler(dataset, world_size, rank, shuffle=shuffle, samples_per_gpu=samples_per_gpu, seed=seed) shuffle = False batch_size = samples_per_gpu num_workers = workers_per_gpu else: sampler = None batch_size = (num_gpus * samples_per_gpu) num_workers = (num_gpus * workers_per_gpu) init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None) if (version.parse(torch.__version__) >= version.parse('1.7.0')): kwargs['persistent_workers'] = persistent_workers data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=pin_memory, shuffle=shuffle, worker_init_fn=init_fn, drop_last=drop_last, **kwargs) return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed): 'Function to initialize each worker.\n\n The seed of each worker equals to\n ``num_worker * rank + worker_id + user_seed``.\n\n Args:\n worker_id (int): Id for each worker.\n num_workers (int): Number of workers.\n rank (int): Rank in distributed training.\n seed (int): Random seed.\n ' worker_seed = (((num_workers * rank) + worker_id) + seed) np.random.seed(worker_seed) random.seed(worker_seed) torch.manual_seed(worker_seed)