code
stringlengths
17
6.64M
def run_cmd(cmd): os.system(cmd)
def opencv_write_jpeg(src_path, quality, tar_path): img = cv2.imread(src_path) encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] (_, jpeg_data) = cv2.imencode('.jpg', img, encode_param) comp_img = cv2.imdecode(jpeg_data, cv2.IMREAD_COLOR) cv2.imwrite(tar_path, comp_img)
def parse_args(): parser = argparse.ArgumentParser(description='Compress image dataset.') parser.add_argument('--codec', type=str, required=True, choices=['bpg', 'jpeg']) parser.add_argument('--dataset', type=str, required=True, choices=['div2k', 'flickr2k']) parser.add_argument('--max-npro', type=int, default=16) parser.add_argument('--quality', type=int, default=37) args = parser.parse_args() return args
def write_planar(img, planar_path): 'Write planar.\n\n img: list of (h, w) array; each list item represents a channel.\n ' planar_file = open(planar_path, 'wb') for cha in img: (h, w) = cha.shape for ih in range(h): for iw in range(w): planar_file.write(cha[(ih, iw)]) planar_file.close()
def read_planar(planar_path, fmt=((1080, 1920), (1080, 1920), (1080, 1920))): 'Read planar.\n\n fmt: tuple of (h, w) tuple; each tuple item represents a channel.\n\n https://numpy.org/doc/stable/reference/generated/numpy.fromfile.html\n ' planar_file = np.fromfile(planar_path, dtype=np.uint8) img = [] accum = 0 for res in fmt: (h, w) = res cha = planar_file[accum:(accum + (h * w))].reshape(h, w) img.append(cha) accum += (h * w) return img
def write_ycbcr420(src_paths, tar_path, wdt, hgt): ycbcr420 = [] for src_path in src_paths: bgr = cv2.imread(src_path) (_hgt, _wdt) = bgr.shape[:2] assert ((_hgt == hgt) and (_wdt == wdt)) ycrcb = cv2.cvtColor(bgr, cv2.COLOR_BGR2YCrCb) cr_sub = cv2.resize(ycrcb[(..., 1)], ((wdt // 2), (hgt // 2)), interpolation=cv2.INTER_AREA) cb_sub = cv2.resize(ycrcb[(..., 2)], ((wdt // 2), (hgt // 2)), interpolation=cv2.INTER_AREA) ycbcr420.append(ycrcb[(..., 0)]) ycbcr420.append(cb_sub) ycbcr420.append(cr_sub) write_planar(ycbcr420, tar_path) return tar_path
def read_ycbcr420(src_path, tar_paths, wdt, hgt, print_dir): ycbcr420_nfrms = read_planar(src_path, fmt=(((hgt, wdt), ((hgt // 2), (wdt // 2)), ((hgt // 2), (wdt // 2))) * nfrms)) for (idx, tar_path) in enumerate(tar_paths): ycrcb = np.empty((hgt, wdt, 3), np.uint8) ycrcb[(..., 0)] = ycbcr420_nfrms[(3 * idx)] ycrcb[(..., 1)] = cv2.resize(ycbcr420_nfrms[((3 * idx) + 2)], (wdt, hgt), interpolation=cv2.INTER_CUBIC) ycrcb[(..., 2)] = cv2.resize(ycbcr420_nfrms[((3 * idx) + 1)], (wdt, hgt), interpolation=cv2.INTER_CUBIC) bgr = cv2.cvtColor(ycrcb, cv2.COLOR_YCrCb2BGR) cv2.imwrite(tar_path, bgr) return print_dir
def run_cmd(cmd): os.system(cmd) return cmd
def img2planar(vids): "According to the HM manual, HM accepts videos in raw 4:2:0 planar format\n (Y'CbCr)." pool = mp.Pool(processes=args.max_nprocs) for vid in vids: pool.apply_async(func=write_ycbcr420, args=(vid['src_paths'], vid['planar_path'], vid['wdt'], vid['hgt']), callback=(lambda x: print(x)), error_callback=(lambda err: print(err))) pool.close() pool.join()
def compress_planar(vids): pool = mp.Pool(processes=args.max_nprocs) for vid in vids: enc_cmd = f"{enc_path} -i {vid['planar_path']} -c {cfg_path} -b {vid['bit_path']} -o {vid['comp_planar_path']}" if (((vid['wdt'] % 8) != 0) or ((vid['hgt'] % 8) != 0)): enc_cmd += ' --ConformanceWindowMode=1' enc_cmd += f" -q {args.qp} --Level=3.1 -fr 30 -wdt {vid['wdt']} -hgt {vid['hgt']} -f {vid['nfrms']} > {vid['log_path']}" pool.apply_async(func=run_cmd, args=(enc_cmd,), callback=(lambda x: print(x)), error_callback=(lambda err: print(err))) pool.close() pool.join()
def planar2img(vids): pool = mp.Pool(processes=args.max_nprocs) for vid in vids: _dir = osp.dirname(vid['tar_paths'][0]) os.makedirs(_dir) pool.apply_async(func=read_ycbcr420, args=(vid['comp_planar_path'], vid['tar_paths'], vid['wdt'], vid['hgt'], _dir), callback=(lambda x: print(x)), error_callback=(lambda err: print(err))) pool.close() pool.join()
def planar2img_mfqev2(vids): pool = mp.Pool(processes=args.max_nprocs) for vid in vids: _dir = osp.dirname(vid['src_paths'][0]) os.makedirs(_dir) pool.apply_async(func=read_ycbcr420, args=(vid['planar_path'], vid['src_paths'], vid['wdt'], vid['hgt'], _dir), callback=(lambda x: print(x)), error_callback=(lambda err: print(err))) pool.close() pool.join()
def parse_args(): parser = argparse.ArgumentParser(description='Compress video dataset.') parser.add_argument('--max-nprocs', type=int, default=16) parser.add_argument('--dataset', type=str, required=True, choices=['vimeo-triplet', 'vimeo-septuplet', 'mfqev2']) parser.add_argument('--qp', type=int, default=37) args = parser.parse_args() return args
def parse_args(): parser = argparse.ArgumentParser(description='Eval PSNR.') parser.add_argument('--dataset', type=str, required=True, choices=['div2k', 'flickr2k', 'vimeo-triplet', 'vimeo-septuplet', 'mfqev2']) parser.add_argument('--out-dir', type=str, default=None) parser.add_argument('--crop-boarder', type=int, default=0) args = parser.parse_args() return args
def cal_imgdir_psnr(img_infos, silent=False, crop_boarder=0): '[{src:img path, tar:img path}]' results = [] if (not silent): img_infos = tqdm(img_infos, ncols=0) n_ignore = 0 for img_info in img_infos: src = cv2.imread(img_info['src']) tar = cv2.imread(img_info['tar']) psnr = cal_psnr(src, tar, crop_border=crop_boarder) if math.isinf(psnr): n_ignore += 1 else: results.append(psnr) assert results, 'No valid result' ave_psnr = np.mean(results) return (ave_psnr, n_ignore)
def cal_lq_out_psnr(gt_dir, lq_dir, img_names, args): if args.out_dir: img_infos = [dict(src=osp.join(gt_dir, img_name), tar=osp.join(args.out_dir, img_name)) for img_name in img_names] (ave_psnr, n_ignore) = cal_imgdir_psnr(img_infos=img_infos, crop_boarder=args.crop_boarder) print(f'Ave. PSNR (Out): {ave_psnr:.4f} dB.') if n_ignore: print(f'{n_ignore} frames are ignored.') img_infos = [dict(src=osp.join(gt_dir, img_name), tar=osp.join(lq_dir, img_name)) for img_name in img_names] (ave_psnr, n_ignore) = cal_imgdir_psnr(img_infos=img_infos, crop_boarder=args.crop_boarder) print(f'Ave. PSNR (LQ): {ave_psnr:.4f} dB.') if n_ignore: print(f'{n_ignore} frames are ignored.')
def cal_videos_psnr(gt_dir, lq_dir, sub_dirs, args): "sub_dirs (list): list of dict with keys 'dir_name' and 'img_names'." if args.out_dir: results = [] n_ignore_accm = 0 for sub_dir in tqdm(sub_dirs, ncols=0): img_infos = [dict(src=osp.join(gt_dir, sub_dir['dir_name'], img_name), tar=osp.join(args.out_dir, sub_dir['dir_name'], img_name)) for img_name in sub_dir['img_names']] (psnr, n_ignore) = cal_imgdir_psnr(img_infos=img_infos, silent=True, crop_boarder=args.crop_boarder) results.append(psnr) n_ignore_accm += n_ignore ave_psnr = np.mean(results) print(f'Ave. PSNR (Out): {ave_psnr:.4f} dB.') if n_ignore_accm: print(f'{n_ignore_accm} frames are ignored.') results = [] n_ignore_accm = 0 for sub_dir in tqdm(sub_dirs, ncols=0): img_infos = [dict(src=osp.join(gt_dir, sub_dir['dir_name'], img_name), tar=osp.join(lq_dir, sub_dir['dir_name'], img_name)) for img_name in sub_dir['img_names']] (psnr, n_ignore) = cal_imgdir_psnr(img_infos=img_infos, silent=True, crop_boarder=args.crop_boarder) results.append(psnr) n_ignore_accm += n_ignore ave_psnr = np.mean(results) print(f'Ave. PSNR (LQ): {ave_psnr:.4f} dB.') if n_ignore_accm: print(f'{n_ignore_accm} frames are ignored.')
def parse_args(): parser = argparse.ArgumentParser(description='mmediting tester') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument('--out', help='output result pickle file') parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results') parser.add_argument('--save-path', default=None, type=str, help='path to store images and if not given, will not save image') parser.add_argument('--tmpdir', help='tmp dir for writing some results') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) return args
def main(): args = parse_args() cfg = Config.fromfile(args.config) if (args.cfg_options is not None): cfg.merge_from_dict(args.cfg_options) setup_multi_processes(cfg) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) (rank, _) = get_dist_info() if (args.seed is not None): if (rank == 0): print('set random seed to', args.seed) set_random_seed(args.seed, deterministic=args.deterministic) dataset = build_dataset(cfg.data.test) loader_cfg = {**dict(((k, cfg.data[k]) for k in ['workers_per_gpu'] if (k in cfg.data))), **dict(samples_per_gpu=1, drop_last=False, shuffle=False, dist=distributed), **cfg.data.get('test_dataloader', {})} data_loader = build_dataloader(dataset, **loader_cfg) model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) args.save_image = (args.save_path is not None) empty_cache = cfg.get('empty_cache', False) if (not distributed): _ = load_checkpoint(model, args.checkpoint, map_location='cpu') model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, save_path=args.save_path, save_image=args.save_image) else: find_unused_parameters = cfg.get('find_unused_parameters', False) model = DistributedDataParallelWrapper(model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) device_id = torch.cuda.current_device() _ = load_checkpoint(model, args.checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id))) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, save_path=args.save_path, save_image=args.save_image, empty_cache=empty_cache) if ((rank == 0) and ('eval_result' in outputs[0])): print('') stats = dataset.evaluate(outputs) for stat in stats: print(f'Eval {stat}: {stats[stat]}') if args.out: print(f'Writing results to {args.out}.') mmcv.dump(outputs, args.out)
def parse_args(): parser = argparse.ArgumentParser(description='Train an editor') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the checkpoint file to resume from') parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') parser.add_argument('--gpus', type=int, default=1, help='number of gpus to use (only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument('--diff_seed', action='store_true', help='Whether or not set different seeds for different ranks') parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus') args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) return args
def main(): args = parse_args() cfg = Config.fromfile(args.config) if (args.cfg_options is not None): cfg.merge_from_dict(args.cfg_options) setup_multi_processes(cfg) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if (args.work_dir is not None): cfg.work_dir = args.work_dir if (args.resume_from is not None): cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * cfg.gpus) / 8) if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) env_info_dict = collect_env.collect_env() env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()]) dash_line = (('-' * 60) + '\n') logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line)) logger.info('Distributed training: {}'.format(distributed)) logger.info('mmedit Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) seed = init_random_seed(args.seed) seed = ((seed + dist.get_rank()) if args.diff_seed else seed) logger.info('Set random seed to {}, deterministic: {}'.format(seed, args.deterministic)) set_random_seed(seed, deterministic=args.deterministic) cfg.seed = seed model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if (len(cfg.workflow) == 2): val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if (cfg.checkpoint_config is not None): cfg.checkpoint_config.meta = dict(mmedit_version=__version__, config=cfg.text) meta = dict() if (cfg.get('exp_name', None) is None): cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0] meta['exp_name'] = cfg.exp_name meta['mmedit Version'] = __version__ meta['seed'] = seed meta['env_info'] = env_info train_model(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
def yuv_import(filename, dims, numfrm, startfrm): fp = open(filename, 'rb') blk_size = ((np.prod(dims) * 3) / 2) fp.seek(int((blk_size * startfrm)), 0) d00 = (dims[0] // 2) d01 = (dims[1] // 2) Y = np.zeros((numfrm, dims[0], dims[1]), np.uint8, 'C') U = np.zeros((numfrm, d00, d01), np.uint8, 'C') V = np.zeros((numfrm, d00, d01), np.uint8, 'C') for i in range(numfrm): for m in range(dims[0]): for n in range(dims[1]): Y[(i, m, n)] = ord(fp.read(1)) for m in range(d00): for n in range(d01): U[(i, m, n)] = ord(fp.read(1)) for m in range(d00): for n in range(d01): V[(i, m, n)] = ord(fp.read(1)) fp.close() return (Y, U, V)
def yuv2rgb(Y, U, V, height, width): U = imresize(U, [height, width], 'bilinear', mode='F') V = imresize(V, [height, width], 'bilinear', mode='F') Y = (Y * 255.0) rf = (Y + (1.4075 * ((V * 255.0) - 128.0))) gf = ((Y - (0.3455 * ((U * 255.0) - 128.0))) - (0.7169 * ((V * 255.0) - 128.0))) bf = (Y + (1.779 * ((U * 255.0) - 128.0))) for m in range(height): for n in range(width): if (rf[(m, n)] > 255): rf[(m, n)] = 255 if (gf[(m, n)] > 255): gf[(m, n)] = 255 if (bf[(m, n)] > 255): bf[(m, n)] = 255 if (rf[(m, n)] < 0): rf[(m, n)] = 0 if (gf[(m, n)] < 0): gf[(m, n)] = 0 if (bf[(m, n)] < 0): bf[(m, n)] = 0 r = rf.astype(uint8) g = gf.astype(uint8) b = bf.astype(uint8) return (r, g, b)
def warp_img(batch_size, imga, imgb, reuse, scope='easyflow'): (n, h, w, c) = imga.get_shape().as_list() with tf.variable_scope(scope, reuse=reuse): with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)), slim.arg_scope([slim.conv2d_transpose], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)): inputs = tf.concat([imga, imgb], 3, name='flow_inp') c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1') c2 = slim.conv2d(c1, 24, [3, 3], scope='c2') c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3') c4 = slim.conv2d(c3, 24, [3, 3], scope='c4') c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5') c5_hr = tf.reshape(c5, [n, int((h / 4)), int((w / 4)), 2, 4, 4]) c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3]) c5_hr = tf.reshape(c5_hr, [n, h, w, 2]) img_warp1 = func.transformer(batch_size, c, c5_hr, imgb, [h, w]) c5_pack = tf.concat([inputs, c5_hr, img_warp1], 3, name='cat') s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1') s2 = slim.conv2d(s1, 24, [3, 3], scope='s2') s3 = slim.conv2d(s2, 24, [3, 3], scope='s3') s4 = slim.conv2d(s3, 24, [3, 3], scope='s4') s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5') s5_hr = tf.reshape(s5, [n, int((h / 2)), int((w / 2)), 2, 2, 2]) s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3]) s5_hr = tf.reshape(s5_hr, [n, h, w, 2]) uv = (c5_hr + s5_hr) img_warp2 = func.transformer(batch_size, c, uv, imgb, [h, w]) s5_pack = tf.concat([inputs, uv, img_warp2], 3, name='cat2') a1 = slim.conv2d(s5_pack, 24, [3, 3], scope='a1') a2 = slim.conv2d(a1, 24, [3, 3], scope='a2') a3 = slim.conv2d(a2, 24, [3, 3], scope='a3') a4 = slim.conv2d(a3, 24, [3, 3], scope='a4') a5 = slim.conv2d(a4, 2, [3, 3], activation_fn=tf.nn.tanh, scope='a5') a5_hr = tf.reshape(a5, [n, h, w, 2, 1, 1]) a5_hr = tf.transpose(a5_hr, [0, 1, 4, 2, 5, 3]) a5_hr = tf.reshape(a5_hr, [n, h, w, 2]) uv2 = (a5_hr + uv) img_warp3 = func.transformer(batch_size, c, uv2, imgb, [h, w]) return img_warp3
def transformer(batch, chan, flow, U, out_size, name='SpatialTransformer', **kwargs): def _repeat(x, n_repeats): with tf.variable_scope('_repeat'): rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1), [1, 0]) rep = tf.cast(rep, 'int32') x = tf.matmul(tf.reshape(x, ((- 1), 1)), rep) return tf.reshape(x, [(- 1)]) def _repeat2(x, n_repeats): with tf.variable_scope('_repeat'): rep = tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1) rep = tf.cast(rep, 'int32') x = tf.matmul(rep, tf.reshape(x, (1, (- 1)))) return tf.reshape(x, [(- 1)]) def _interpolate(im, x, y, out_size): with tf.variable_scope('_interpolate'): num_batch = tf.shape(im)[0] height = tf.shape(im)[1] width = tf.shape(im)[2] channels = tf.shape(im)[3] x = tf.cast(x, 'float32') y = tf.cast(y, 'float32') height_f = tf.cast(height, 'float32') width_f = tf.cast(width, 'float32') out_height = out_size[0] out_width = out_size[1] zero = tf.zeros([], dtype='int32') max_y = tf.cast((tf.shape(im)[1] - 1), 'int32') max_x = tf.cast((tf.shape(im)[2] - 1), 'int32') x = (tf.cast(_repeat2(tf.range(0, width), (height * num_batch)), 'float32') + (x * 64)) y = (tf.cast(_repeat2(_repeat(tf.range(0, height), width), num_batch), 'float32') + (y * 64)) x0 = tf.cast(tf.floor(x), 'int32') x1 = (x0 + 1) y0 = tf.cast(tf.floor(y), 'int32') y1 = (y0 + 1) x0 = tf.clip_by_value(x0, zero, max_x) x1 = tf.clip_by_value(x1, zero, max_x) y0 = tf.clip_by_value(y0, zero, max_y) y1 = tf.clip_by_value(y1, zero, max_y) dim2 = width dim1 = (width * height) base = _repeat((tf.range(num_batch) * dim1), (out_height * out_width)) base_y0 = (base + (y0 * dim2)) base_y1 = (base + (y1 * dim2)) idx_a = (base_y0 + x0) idx_b = (base_y1 + x0) idx_c = (base_y0 + x1) idx_d = (base_y1 + x1) im_flat = tf.reshape(im, tf.stack([(- 1), channels])) im_flat = tf.cast(im_flat, 'float32') Ia = tf.gather(im_flat, idx_a) Ib = tf.gather(im_flat, idx_b) Ic = tf.gather(im_flat, idx_c) Id = tf.gather(im_flat, idx_d) x0_f = tf.cast(x0, 'float32') x1_f = tf.cast(x1, 'float32') y0_f = tf.cast(y0, 'float32') y1_f = tf.cast(y1, 'float32') wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1) wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1) wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1) wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1) output = tf.add_n([(wa * Ia), (wb * Ib), (wc * Ic), (wd * Id)]) return output def _meshgrid(height, width): with tf.variable_scope('_meshgrid'): x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace((- 1.0), 1.0, width), 1), [1, 0])) y_t = tf.matmul(tf.expand_dims(tf.linspace((- 1.0), 1.0, height), 1), tf.ones(shape=tf.stack([1, width]))) x_t_flat = tf.reshape(x_t, (1, (- 1))) y_t_flat = tf.reshape(y_t, (1, (- 1))) ones = tf.ones_like(x_t_flat) grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones]) return grid def _transform(x_s, y_s, input_dim, out_size): with tf.variable_scope('_transform'): num_batch = tf.shape(input_dim)[0] height = tf.shape(input_dim)[1] width = tf.shape(input_dim)[2] num_channels = tf.shape(input_dim)[3] height_f = tf.cast(height, 'float32') width_f = tf.cast(width, 'float32') out_height = out_size[0] out_width = out_size[1] x_s_flat = tf.reshape(x_s, [(- 1)]) y_s_flat = tf.reshape(y_s, [(- 1)]) input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size) output = tf.reshape(input_transformed, tf.stack([batch, out_height, out_width, chan])) return output with tf.variable_scope(name): (dx, dy) = tf.split(flow, 2, 3) output = _transform(dx, dy, U, out_size) return output
def network(frame1, frame2, frame3, reuse=False, scope='netflow'): with tf.variable_scope(scope, reuse=reuse): with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)), slim.arg_scope([slim.conv2d_transpose], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)): c11 = slim.conv2d(frame1, 128, [9, 9], scope='conv1_1') c12 = slim.conv2d(frame2, 128, [9, 9], scope='conv1_2') c13 = slim.conv2d(frame3, 128, [9, 9], scope='conv1_3') concat1_12 = tf.concat([c11, c12], 3, name='concat1_12') concat1_23 = tf.concat([c12, c13], 3, name='concat1_23') c21 = slim.conv2d(concat1_12, 64, [7, 7], scope='conv2_1') c22 = slim.conv2d(concat1_23, 64, [7, 7], scope='conv2_2') c31 = slim.conv2d(c21, 64, [3, 3], scope='conv3_1') c32 = slim.conv2d(c22, 64, [3, 3], scope='conv3_2') concat3_12 = tf.concat([c31, c32], 3, name='concat3_12') c4 = slim.conv2d(concat3_12, 32, [1, 1], scope='conv4_1') c5 = slim.conv2d(c4, 1, [5, 5], activation_fn=None, scope='conv5') output = tf.add(c5, frame2) return output
def create_lmdb_for_mfqev2(): with open(yml_path, 'r') as fp: fp = yaml.load(fp, Loader=yaml.FullLoader) root_dir = fp['dataset']['train']['root'] gt_folder = fp['dataset']['train']['gt_folder'] lq_folder = fp['dataset']['train']['lq_folder'] gt_path = fp['dataset']['train']['gt_path'] lq_path = fp['dataset']['train']['lq_path'] gt_dir = op.join(root_dir, gt_folder) lq_dir = op.join(root_dir, lq_folder) lmdb_gt_path = op.join(root_dir, gt_path) lmdb_lq_path = op.join(root_dir, lq_path) print('Scaning videos...') gt_video_list = sorted(glob.glob(op.join(gt_dir, '*.yuv'))) lq_video_list = [op.join(lq_dir, gt_video_path.split('/')[(- 1)]) for gt_video_path in gt_video_list] msg = f'> {len(gt_video_list)} videos found.' print(msg) print('Scaning GT frames (only center frames of each sequence)...') frm_list = [] for gt_video_path in gt_video_list: nfs = int(gt_video_path.split('.')[(- 2)].split('/')[(- 1)].split('_')[(- 1)]) nfs = (nfs if (nfs <= 300) else 300) num_seq = (nfs // ((2 * radius) + 1)) frm_list.append([(radius + (iter_seq * ((2 * radius) + 1))) for iter_seq in range(num_seq)]) num_frm_total = sum([len(frms) for frms in frm_list]) msg = f'> {num_frm_total} frames found.' print(msg) key_list = [] video_path_list = [] index_frame_list = [] for iter_vid in range(len(gt_video_list)): frms = frm_list[iter_vid] for iter_frm in range(len(frms)): key_list.append('{:03d}/{:03d}/im4.png'.format((iter_vid + 1), (iter_frm + 1))) video_path_list.append(gt_video_list[iter_vid]) index_frame_list.append(frms[iter_frm]) print('Writing LMDB for GT data...') make_y_lmdb_from_yuv(video_path_list=video_path_list, index_frame_list=index_frame_list, key_list=key_list, lmdb_path=lmdb_gt_path, multiprocessing_read=True) print('> Finish.') print('Scaning LQ frames...') len_input = ((2 * radius) + 1) frm_list = [] for lq_video_path in lq_video_list: nfs = int(lq_video_path.split('.')[(- 2)].split('/')[(- 1)].split('_')[(- 1)]) nfs = (nfs if (nfs <= 300) else 300) num_seq = (nfs // len_input) frm_list.append([list(range((iter_seq * len_input), ((iter_seq + 1) * len_input))) for iter_seq in range(num_seq)]) num_frm_total = sum([(len(frms) * len_input) for frms in frm_list]) msg = f'> {num_frm_total} frames found.' print(msg) key_list = [] video_path_list = [] index_frame_list = [] for iter_vid in range(len(lq_video_list)): frm_seq = frm_list[iter_vid] for iter_seq in range(len(frm_seq)): key_list.extend(['{:03d}/{:03d}/im{:d}.png'.format((iter_vid + 1), (iter_seq + 1), i) for i in range(1, (len_input + 1))]) video_path_list.extend(([lq_video_list[iter_vid]] * len_input)) index_frame_list.extend(frm_seq[iter_seq]) print('Writing LMDB for LQ data...') make_y_lmdb_from_yuv(video_path_list=video_path_list, index_frame_list=index_frame_list, key_list=key_list, lmdb_path=lmdb_lq_path, multiprocessing_read=True) print('> Finish.') if (not op.exists('data/MFQEv2')): if (not op.exists('data/')): os.system('mkdir data/') os.system(f'ln -s {root_dir} ./data/MFQEv2') print('Sym-linking done.') else: print('data/MFQEv2 already exists.')
def create_lmdb_for_vimeo90k(): with open(yml_path, 'r') as fp: fp = yaml.load(fp, Loader=yaml.FullLoader) root_dir = fp['dataset']['root'] gt_folder = fp['dataset']['train']['gt_folder'] lq_folder = fp['dataset']['train']['lq_folder'] gt_path = fp['dataset']['train']['gt_path'] lq_path = fp['dataset']['train']['lq_path'] meta_path = fp['dataset']['train']['meta_path'] gt_dir = op.join(root_dir, gt_folder) lq_dir = op.join(root_dir, lq_folder) lmdb_gt_path = op.join(root_dir, gt_path) lmdb_lq_path = op.join(root_dir, lq_path) meta_path = op.join(root_dir, meta_path) print('Scaning meta list...') gt_video_list = [] lq_video_list = [] meta_fp = open(meta_path, 'r') while True: new_line = meta_fp.readline().split('\n')[0] if (new_line == ''): break vid_name = ((new_line.split('/')[0] + '_') + new_line.split('/')[1]) qt_path = op.join(gt_dir, (vid_name + '.yuv')) gt_video_list.append(qt_path) lq_path = op.join(lq_dir, (vid_name + '.yuv')) lq_video_list.append(lq_path) msg = f'> {len(gt_video_list)} videos found.' print(msg) print('Scaning GT frames (only center frames of each sequence)...') frm_list = [] for gt_video_path in gt_video_list: nfs = 7 num_seq = (nfs // ((2 * radius) + 1)) frm_list.append([(radius + (iter_seq * ((2 * radius) + 1))) for iter_seq in range(num_seq)]) num_frm_total = sum([len(frms) for frms in frm_list]) msg = f'> {num_frm_total} frames found.' print(msg) key_list = [] video_path_list = [] index_frame_list = [] for iter_vid in range(len(gt_video_list)): frms = frm_list[iter_vid] for iter_frm in range(len(frms)): key_list.append('{:03d}/{:03d}/im4.png'.format((iter_vid + 1), (iter_frm + 1))) video_path_list.append(gt_video_list[iter_vid]) index_frame_list.append(frms[iter_frm]) print('Writing LMDB for GT data...') make_y_lmdb_from_yuv(video_path_list=video_path_list, yuv_type='444p', h=256, w=448, index_frame_list=index_frame_list, key_list=key_list, lmdb_path=lmdb_gt_path, multiprocessing_read=True) print('> Finish.') print('Scaning LQ frames...') len_input = ((2 * radius) + 1) frm_list = [] for lq_video_path in lq_video_list: nfs = 7 num_seq = (nfs // len_input) frm_list.append([list(range((iter_seq * len_input), ((iter_seq + 1) * len_input))) for iter_seq in range(num_seq)]) num_frm_total = sum([(len(frms) * len_input) for frms in frm_list]) msg = f'> {num_frm_total} frames found.' print(msg) key_list = [] video_path_list = [] index_frame_list = [] for iter_vid in range(len(lq_video_list)): frm_seq = frm_list[iter_vid] for iter_seq in range(len(frm_seq)): key_list.extend(['{:03d}/{:03d}/im{:d}.png'.format((iter_vid + 1), (iter_seq + 1), i) for i in range(1, (len_input + 1))]) video_path_list.extend(([lq_video_list[iter_vid]] * len_input)) index_frame_list.extend(frm_seq[iter_seq]) print('Writing LMDB for LQ data...') make_y_lmdb_from_yuv(video_path_list=video_path_list, yuv_type='444p', h=256, w=448, index_frame_list=index_frame_list, key_list=key_list, lmdb_path=lmdb_lq_path, multiprocessing_read=True) print('> Finish.') if (not op.exists('data/vimeo90k')): if (not op.exists('data/')): os.system('mkdir data/') os.system(f'ln -s {root_dir} ./data/vimeo90k') print('Sym-linking done.') else: print('data/vimeo90k already exists.')
def _bytes2img(img_bytes): img_np = np.frombuffer(img_bytes, np.uint8) img = np.expand_dims(cv2.imdecode(img_np, cv2.IMREAD_GRAYSCALE), 2) img = (img.astype(np.float32) / 255.0) return img
class MFQEv2Dataset(data.Dataset): 'MFQEv2 dataset.\n\n For training data: LMDB is adopted. See create_lmdb for details.\n \n Return: A dict includes:\n img_lqs: (T, [RGB], H, W)\n img_gt: ([RGB], H, W)\n key: str\n ' def __init__(self, opts_dict, radius): super().__init__() self.opts_dict = opts_dict self.gt_root = op.join('data/MFQEv2/', self.opts_dict['gt_path']) self.lq_root = op.join('data/MFQEv2/', self.opts_dict['lq_path']) self.meta_info_path = op.join(self.gt_root, self.opts_dict['meta_info_fp']) with open(self.meta_info_path, 'r') as fin: self.keys = [line.split(' ')[0] for line in fin] self.file_client = None self.io_opts_dict = dict() self.io_opts_dict['type'] = 'lmdb' self.io_opts_dict['db_paths'] = [self.lq_root, self.gt_root] self.io_opts_dict['client_keys'] = ['lq', 'gt'] if (radius == 0): self.neighbor_list = [4, 4, 4] else: nfs = ((2 * radius) + 1) self.neighbor_list = [(i + ((9 - nfs) // 2)) for i in range(nfs)] def __getitem__(self, index): if (self.file_client is None): self.file_client = FileClient(self.io_opts_dict.pop('type'), **self.io_opts_dict) if (self.opts_dict['random_reverse'] and (random.random() < 0.5)): self.neighbor_list.reverse() gt_size = self.opts_dict['gt_size'] key = self.keys[index] (clip, seq, _) = key.split('/') img_gt_path = key img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = _bytes2img(img_bytes) img_lqs = [] for neighbor in self.neighbor_list: img_lq_path = f'{clip}/{seq}/im{neighbor}.png' img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = _bytes2img(img_bytes) img_lqs.append(img_lq) (img_gt, img_lqs) = paired_random_crop(img_gt, img_lqs, gt_size, img_gt_path) img_lqs.append(img_gt) img_results = augment(img_lqs, self.opts_dict['use_flip'], self.opts_dict['use_rot']) img_results = totensor(img_results) img_lqs = torch.stack(img_results[0:(- 1)], dim=0) img_gt = img_results[(- 1)] return {'lq': img_lqs, 'gt': img_gt} def __len__(self): return len(self.keys)
class VideoTestMFQEv2Dataset(data.Dataset): '\n Video test dataset for MFQEv2 dataset recommended by ITU-T.\n\n For validation data: Disk IO is adopted.\n \n Test all frames. For the front and the last frames, they serve as their own\n neighboring frames.\n ' def __init__(self, opts_dict, radius): super().__init__() assert (radius != 0), 'Not implemented!' self.opts_dict = opts_dict self.gt_root = op.join('data/MFQEv2/', self.opts_dict['gt_path']) self.lq_root = op.join('data/MFQEv2/', self.opts_dict['lq_path']) self.data_info = {'lq_path': [], 'gt_path': [], 'gt_index': [], 'lq_indexes': [], 'h': [], 'w': [], 'index_vid': [], 'name_vid': []} gt_path_list = sorted(glob.glob(op.join(self.gt_root, '*.yuv'))) self.vid_num = len(gt_path_list) for (idx_vid, gt_vid_path) in enumerate(gt_path_list): name_vid = gt_vid_path.split('/')[(- 1)] (w, h) = map(int, name_vid.split('_')[(- 2)].split('x')) nfs = int(name_vid.split('.')[(- 2)].split('_')[(- 1)]) lq_vid_path = op.join(self.lq_root, name_vid) for iter_frm in range(nfs): lq_indexes = list(range((iter_frm - radius), ((iter_frm + radius) + 1))) lq_indexes = list(np.clip(lq_indexes, 0, (nfs - 1))) self.data_info['index_vid'].append(idx_vid) self.data_info['gt_path'].append(gt_vid_path) self.data_info['lq_path'].append(lq_vid_path) self.data_info['name_vid'].append(name_vid) self.data_info['w'].append(w) self.data_info['h'].append(h) self.data_info['gt_index'].append(iter_frm) self.data_info['lq_indexes'].append(lq_indexes) def __getitem__(self, index): img = import_yuv(seq_path=self.data_info['gt_path'][index], h=self.data_info['h'][index], w=self.data_info['w'][index], tot_frm=1, start_frm=self.data_info['gt_index'][index], only_y=True) img_gt = (np.expand_dims(np.squeeze(img), 2).astype(np.float32) / 255.0) img_lqs = [] for lq_index in self.data_info['lq_indexes'][index]: img = import_yuv(seq_path=self.data_info['lq_path'][index], h=self.data_info['h'][index], w=self.data_info['w'][index], tot_frm=1, start_frm=lq_index, only_y=True) img_lq = (np.expand_dims(np.squeeze(img), 2).astype(np.float32) / 255.0) img_lqs.append(img_lq) img_lqs.append(img_gt) img_results = totensor(img_lqs) img_lqs = torch.stack(img_results[0:(- 1)], dim=0) img_gt = img_results[(- 1)] return {'lq': img_lqs, 'gt': img_gt, 'name_vid': self.data_info['name_vid'][index], 'index_vid': self.data_info['index_vid'][index]} def __len__(self): return len(self.data_info['gt_path']) def get_vid_num(self): return self.vid_num
def _bytes2img(img_bytes): img_np = np.frombuffer(img_bytes, np.uint8) img = np.expand_dims(cv2.imdecode(img_np, cv2.IMREAD_GRAYSCALE), 2) img = (img.astype(np.float32) / 255.0) return img
class Vimeo90KDataset(data.Dataset): 'Vimeo-90K dataset.\n\n For training data: LMDB is adopted. See create_lmdb for details.\n \n Return: A dict includes:\n img_lqs: (T, [RGB], H, W)\n img_gt: ([RGB], H, W)\n key: str\n ' def __init__(self, opts_dict, radius): super().__init__() self.opts_dict = opts_dict self.gt_root = op.join('data/vimeo90k/', self.opts_dict['gt_path']) self.lq_root = op.join('data/vimeo90k/', self.opts_dict['lq_path']) self.meta_info_path = op.join(self.gt_root, 'meta_info.txt') with open(self.meta_info_path, 'r') as fin: self.keys = [line.split(' ')[0] for line in fin] self.file_client = None self.io_opts_dict = dict() self.io_opts_dict['type'] = 'lmdb' self.io_opts_dict['db_paths'] = [self.lq_root, self.gt_root] self.io_opts_dict['client_keys'] = ['lq', 'gt'] if (radius == 0): self.neighbor_list = [4, 4, 4] else: nfs = ((2 * radius) + 1) self.neighbor_list = [(i + ((9 - nfs) // 2)) for i in range(nfs)] def __getitem__(self, index): if (self.file_client is None): self.file_client = FileClient(self.io_opts_dict.pop('type'), **self.io_opts_dict) if (self.opts_dict['random_reverse'] and (random.random() < 0.5)): self.neighbor_list.reverse() gt_size = self.opts_dict['gt_size'] key = self.keys[index] (clip, seq, _) = key.split('/') img_gt_path = key img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = _bytes2img(img_bytes) img_lqs = [] for neighbor in self.neighbor_list: img_lq_path = f'{clip}/{seq}/im{neighbor}.png' img_bytes = self.file_client.get(img_lq_path, 'lq') img_lq = _bytes2img(img_bytes) img_lqs.append(img_lq) (img_gt, img_lqs) = paired_random_crop(img_gt, img_lqs, gt_size, img_gt_path) img_lqs.append(img_gt) img_results = augment(img_lqs, self.opts_dict['use_flip'], self.opts_dict['use_rot']) img_results = totensor(img_results) img_lqs = torch.stack(img_results[0:(- 1)], dim=0) img_gt = img_results[(- 1)] return {'lq': img_lqs, 'gt': img_gt} def __len__(self): return len(self.keys)
class VideoTestVimeo90KDataset(data.Dataset): '\n Video test dataset for Vimeo-90K.\n\n For validation data: Disk IO is adopted.\n \n Only test the center frame.\n ' def __init__(self, opts_dict, radius): super().__init__() assert (radius != 0), 'Not implemented!' self.opts_dict = opts_dict self.gt_root = op.join('data/vimeo90k/', self.opts_dict['gt_path']) self.lq_root = op.join('data/vimeo90k/', self.opts_dict['lq_path']) self.meta_info_path = op.join('data/vimeo90k/', self.opts_dict['meta_path']) self.data_info = {'lq_path': [], 'gt_path': [], 'gt_index': [], 'lq_indexes': [], 'h': [], 'w': [], 'index_vid': [], 'name_vid': []} gt_path_list = [] meta_fp = open(self.meta_info_path, 'r') while True: new_line = meta_fp.readline().split('\n')[0] if (new_line == ''): break vid_name = ((new_line.split('/')[0] + '_') + new_line.split('/')[1]) gt_path = op.join(self.gt_root, (vid_name + '.yuv')) gt_path_list.append(gt_path) self.vid_num = len(gt_path_list) for (idx_vid, gt_vid_path) in enumerate(gt_path_list): name_vid = gt_vid_path.split('/')[(- 1)] (w, h) = (448, 256) lq_vid_path = op.join(self.lq_root, name_vid) lq_indexes = list(range(0, 7)) self.data_info['index_vid'].append(idx_vid) self.data_info['gt_path'].append(gt_vid_path) self.data_info['lq_path'].append(lq_vid_path) self.data_info['name_vid'].append(name_vid) self.data_info['w'].append(w) self.data_info['h'].append(h) self.data_info['gt_index'].append(3) self.data_info['lq_indexes'].append(lq_indexes) def __getitem__(self, index): img = import_yuv(seq_path=self.data_info['gt_path'][index], yuv_type='444p', h=self.data_info['h'][index], w=self.data_info['w'][index], tot_frm=1, start_frm=self.data_info['gt_index'][index], only_y=True) img_gt = (np.expand_dims(np.squeeze(img), 2).astype(np.float32) / 255.0) img_lqs = [] for lq_index in self.data_info['lq_indexes'][index]: img = import_yuv(seq_path=self.data_info['lq_path'][index], yuv_type='444p', h=self.data_info['h'][index], w=self.data_info['w'][index], tot_frm=1, start_frm=lq_index, only_y=True) img_lq = (np.expand_dims(np.squeeze(img), 2).astype(np.float32) / 255.0) img_lqs.append(img_lq) img_lqs.append(img_gt) img_results = totensor(img_lqs) img_lqs = torch.stack(img_results[0:(- 1)], dim=0) img_gt = img_results[(- 1)] return {'lq': img_lqs, 'gt': img_gt, 'name_vid': self.data_info['name_vid'][index], 'index_vid': self.data_info['index_vid'][index]} def __len__(self): return len(self.data_info['gt_path']) def get_vid_num(self): return self.vid_num
class DeformConvFunction(Function): @staticmethod def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, im2col_step=64): if ((input is not None) and (input.dim() != 4)): raise ValueError('Expected 4D tensor as input, got {}D tensor instead.'.format(input.dim())) ctx.stride = _pair(stride) ctx.padding = _pair(padding) ctx.dilation = _pair(dilation) ctx.groups = groups ctx.deformable_groups = deformable_groups ctx.im2col_step = im2col_step ctx.save_for_backward(input, offset, weight) output = input.new_empty(DeformConvFunction._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)) ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] if (not input.is_cuda): raise NotImplementedError else: cur_im2col_step = min(ctx.im2col_step, input.shape[0]) assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize' deform_conv_cuda.deform_conv_forward_cuda(input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step) return output @staticmethod def backward(ctx, grad_output): (input, offset, weight) = ctx.saved_tensors grad_input = grad_offset = grad_weight = None if (not grad_output.is_cuda): raise NotImplementedError else: cur_im2col_step = min(ctx.im2col_step, input.shape[0]) assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize' if (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]): grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) deform_conv_cuda.deform_conv_backward_input_cuda(input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step) if ctx.needs_input_grad[2]: grad_weight = torch.zeros_like(weight) deform_conv_cuda.deform_conv_backward_parameters_cuda(input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, cur_im2col_step) return (grad_input, grad_offset, grad_weight, None, None, None, None, None) @staticmethod def _output_size(input, weight, padding, dilation, stride): channels = weight.size(0) output_size = (input.size(0), channels) for d in range((input.dim() - 2)): in_size = input.size((d + 2)) pad = padding[d] kernel = ((dilation[d] * (weight.size((d + 2)) - 1)) + 1) stride_ = stride[d] output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),) if (not all(map((lambda s: (s > 0)), output_size))): raise ValueError('convolution input is too small (output would be {})'.format('x'.join(map(str, output_size)))) return output_size
class ModulatedDeformConvFunction(Function): @staticmethod def forward(ctx, input, offset, mask, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1): ctx.stride = stride ctx.padding = padding ctx.dilation = dilation ctx.groups = groups ctx.deformable_groups = deformable_groups ctx.with_bias = (bias is not None) if (not ctx.with_bias): bias = input.new_empty(1) if (not input.is_cuda): raise NotImplementedError if (weight.requires_grad or mask.requires_grad or offset.requires_grad or input.requires_grad): ctx.save_for_backward(input, offset, mask, weight, bias) output = input.new_empty(ModulatedDeformConvFunction._infer_shape(ctx, input, weight)) ctx._bufs = [input.new_empty(0), input.new_empty(0)] deform_conv_cuda.modulated_deform_conv_cuda_forward(input, weight, bias, ctx._bufs[0], offset, mask, output, ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias) return output @staticmethod def backward(ctx, grad_output): if (not grad_output.is_cuda): raise NotImplementedError (input, offset, mask, weight, bias) = ctx.saved_tensors grad_input = torch.zeros_like(input) grad_offset = torch.zeros_like(offset) grad_mask = torch.zeros_like(mask) grad_weight = torch.zeros_like(weight) grad_bias = torch.zeros_like(bias) deform_conv_cuda.modulated_deform_conv_cuda_backward(input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1], grad_input, grad_weight, grad_bias, grad_offset, grad_mask, grad_output, weight.shape[2], weight.shape[3], ctx.stride, ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation, ctx.groups, ctx.deformable_groups, ctx.with_bias) if (not ctx.with_bias): grad_bias = None return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias, None, None, None, None, None) @staticmethod def _infer_shape(ctx, input, weight): n = input.size(0) channels_out = weight.size(0) (height, width) = input.shape[2:4] (kernel_h, kernel_w) = weight.shape[2:4] height_out = ((((height + (2 * ctx.padding)) - ((ctx.dilation * (kernel_h - 1)) + 1)) // ctx.stride) + 1) width_out = ((((width + (2 * ctx.padding)) - ((ctx.dilation * (kernel_w - 1)) + 1)) // ctx.stride) + 1) return (n, channels_out, height_out, width_out)
class DeformConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=False): super(DeformConv, self).__init__() assert (not bias) assert ((in_channels % groups) == 0), 'in_channels {} cannot be divisible by groups {}'.format(in_channels, groups) assert ((out_channels % groups) == 0), 'out_channels {} cannot be divisible by groups {}'.format(out_channels, groups) self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.deformable_groups = deformable_groups self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // self.groups), *self.kernel_size)) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = (1.0 / math.sqrt(n)) self.weight.data.uniform_((- stdv), stdv) def forward(self, x, offset): return deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
class DeformConvPack(DeformConv): def __init__(self, *args, **kwargs): super(DeformConvPack, self).__init__(*args, **kwargs) self.conv_offset = nn.Conv2d(self.in_channels, (((self.deformable_groups * 2) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True) self.init_offset() def init_offset(self): self.conv_offset.weight.data.zero_() self.conv_offset.bias.data.zero_() def forward(self, x): offset = self.conv_offset(x) return deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
class ModulatedDeformConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=True): super(ModulatedDeformConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.deformable_groups = deformable_groups self.with_bias = bias self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // groups), *self.kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): n = self.in_channels for k in self.kernel_size: n *= k stdv = (1.0 / math.sqrt(n)) self.weight.data.uniform_((- stdv), stdv) if (self.bias is not None): self.bias.data.zero_() def forward(self, x, offset, mask): return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
class ModulatedDeformConvPack(ModulatedDeformConv): def __init__(self, *args, **kwargs): super(ModulatedDeformConvPack, self).__init__(*args, **kwargs) self.conv_offset_mask = nn.Conv2d(self.in_channels, (((self.deformable_groups * 3) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True) self.init_offset() def init_offset(self): self.conv_offset_mask.weight.data.zero_() self.conv_offset_mask.bias.data.zero_() def forward(self, x): out = self.conv_offset_mask(x) (o1, o2, mask) = torch.chunk(out, 3, dim=1) offset = torch.cat((o1, o2), dim=1) mask = torch.sigmoid(mask) return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
def main(): opts_dict = {'radius': 3, 'stdf': {'in_nc': 1, 'out_nc': 64, 'nf': 32, 'nb': 3, 'base_ks': 3, 'deform_ks': 3}, 'qenet': {'in_nc': 64, 'out_nc': 1, 'nf': 48, 'nb': 8, 'base_ks': 3}} model = MFVQE(opts_dict=opts_dict) msg = f'loading model {ckp_path}...' print(msg) checkpoint = torch.load(ckp_path) if ('module.' in list(checkpoint['state_dict'].keys())[0]): new_state_dict = OrderedDict() for (k, v) in checkpoint['state_dict'].items(): name = k[7:] new_state_dict[name] = v model.load_state_dict(new_state_dict) else: model.load_state_dict(checkpoint['state_dict']) msg = f'> model {ckp_path} loaded.' print(msg) model = model.cuda() model.eval() msg = f'loading raw and low-quality yuv...' print(msg) raw_y = utils.import_yuv(seq_path=raw_yuv_path, h=h, w=w, tot_frm=nfs, start_frm=0, only_y=True) lq_y = utils.import_yuv(seq_path=lq_yuv_path, h=h, w=w, tot_frm=nfs, start_frm=0, only_y=True) raw_y = (raw_y.astype(np.float32) / 255.0) lq_y = (lq_y.astype(np.float32) / 255.0) msg = '> yuv loaded.' print(msg) criterion = utils.PSNR() unit = 'dB' pbar = tqdm(total=nfs, ncols=80) ori_psnr_counter = utils.Counter() enh_psnr_counter = utils.Counter() for idx in range(nfs): idx_list = list(range((idx - 3), (idx + 4))) idx_list = np.clip(idx_list, 0, (nfs - 1)) input_data = [] for idx_ in idx_list: input_data.append(lq_y[idx_]) input_data = torch.from_numpy(np.array(input_data)) input_data = torch.unsqueeze(input_data, 0).cuda() enhanced_frm = model(input_data) gt_frm = torch.from_numpy(raw_y[idx]).cuda() batch_ori = criterion(input_data[(0, 3, ...)], gt_frm) batch_perf = criterion(enhanced_frm[(0, 0, ...)], gt_frm) ori_psnr_counter.accum(volume=batch_ori) enh_psnr_counter.accum(volume=batch_perf) pbar.set_description('[{:.3f}] {:s} -> [{:.3f}] {:s}'.format(batch_ori, unit, batch_perf, unit)) pbar.update() pbar.close() ori_ = ori_psnr_counter.get_ave() enh_ = enh_psnr_counter.get_ave() print('ave ori [{:.3f}] {:s}, enh [{:.3f}] {:s}, delta [{:.3f}] {:s}'.format(ori_, unit, enh_, unit, (enh_ - ori_), unit)) print('> done.')
def set_random_seed(seed): 'Set random seeds.' random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed)
def init_dist(local_rank=0, backend='nccl'): tmp.set_start_method('spawn') torch.cuda.set_device(local_rank) dist.init_process_group(backend=backend)
def get_dist_info(): if dist.is_available(): initialized = dist.is_initialized() else: initialized = False if initialized: rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return (rank, world_size)
class DistSampler(Sampler): 'Sampler that restricts data loading to a subset of the dataset.\n \n Modified from torch.utils.data.distributed.DistributedSampler\n Support enlarging the dataset for iteration-based training, for saving\n time when restart the dataloader after each epoch.\n \n Args:\n dataset (torch.utils.data.Dataset): Dataset used for sampling.\n num_replicas (int | None): Number of processes participating in\n the training. It is usually the world_size.\n rank (int | None): Rank of the current process within num_replicas.\n ratio (int): Enlarging ratio. Default: 1.\n ' def __init__(self, dataset, num_replicas=None, rank=None, ratio=1): self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = math.ceil(((len(self.dataset) * ratio) / self.num_replicas)) self.total_size = (self.num_samples * self.num_replicas) def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(self.total_size, generator=g).tolist() dataset_size = len(self.dataset) indices = [(v % dataset_size) for v in indices] indices = indices[self.rank:self.total_size:self.num_replicas] assert (len(indices) == self.num_samples) return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): 'For shuffling data at each epoch. See train.py.' self.epoch = epoch
def create_dataloader(dataset, opts_dict, sampler=None, phase='train', seed=None): 'Create dataloader.' if (phase == 'train'): dataloader_args = dict(dataset=dataset, batch_size=opts_dict['dataset']['train']['batch_size_per_gpu'], shuffle=False, num_workers=opts_dict['dataset']['train']['num_worker_per_gpu'], sampler=sampler, drop_last=True, pin_memory=True) if (sampler is None): dataloader_args['shuffle'] = True dataloader_args['worker_init_fn'] = partial(_worker_init_fn, num_workers=opts_dict['dataset']['train']['num_worker_per_gpu'], rank=opts_dict['train']['rank'], seed=seed) elif (phase == 'val'): dataloader_args = dict(dataset=dataset, batch_size=1, shuffle=False, num_workers=0, pin_memory=False) return DataLoader(**dataloader_args)
def _worker_init_fn(worker_id, num_workers, rank, seed): worker_seed = (((num_workers * rank) + worker_id) + seed) np.random.seed(worker_seed) random.seed(worker_seed)
class CharbonnierLoss(torch.nn.Module): def __init__(self, eps=1e-06): super(CharbonnierLoss, self).__init__() self.eps = eps def forward(self, X, Y): diff = torch.add(X, (- Y)) error = torch.sqrt(((diff * diff) + self.eps)) loss = torch.mean(error) return loss
class PSNR(torch.nn.Module): def __init__(self, eps=1e-06): super(PSNR, self).__init__() self.mse_func = nn.MSELoss() def forward(self, X, Y): mse = self.mse_func(X, Y) psnr = (10 * math.log10((1 / mse.item()))) return psnr
class MultiStepRestartLR(_LRScheduler): ' MultiStep with restarts learning rate scheme.\n\n Args:\n optimizer (torch.nn.optimizer): Torch optimizer.\n milestones (list): Iterations that will decrease learning rate.\n gamma (float): Decrease ratio. Default: 0.1.\n restarts (list): Restart iterations. Default: [0].\n restart_weights (list): Restart weights at each restart iteration.\n Default: [1].\n last_epoch (int): Used in _LRScheduler. Default: -1.\n ' def __init__(self, optimizer, milestones, gamma=0.1, restarts=[0], restart_weights=[1], last_epoch=(- 1)): self.milestones = Counter(milestones) self.gamma = gamma self.restarts = restarts self.restart_weights = restart_weights assert (len(self.restarts) == len(self.restart_weights)), 'restarts and their weights do not match.' super(MultiStepRestartLR, self).__init__(optimizer, last_epoch) def get_lr(self): if (self.last_epoch in self.restarts): weight = self.restart_weights[self.restarts.index(self.last_epoch)] return [(group['initial_lr'] * weight) for group in self.optimizer.param_groups] if (self.last_epoch not in self.milestones): return [group['lr'] for group in self.optimizer.param_groups] return [(group['lr'] * (self.gamma ** self.milestones[self.last_epoch])) for group in self.optimizer.param_groups]
def get_position_from_periods(iteration, cumulative_period): 'Get the position from a period list.\n\n It will return the index of the right-closest number in the period list.\n For example, the cumulative_period = [100, 200, 300, 400],\n if iteration == 50, return 0;\n if iteration == 210, return 2;\n if iteration == 300, return 2.\n\n Args:\n iteration (int): Current iteration.\n cumulative_period (list[int]): Cumulative period list.\n\n Returns:\n int: The position of the right-closest number in the period list.\n ' for (i, period) in enumerate(cumulative_period): if (iteration <= period): return i
class CosineAnnealingRestartLR(_LRScheduler): ' Cosine annealing with restarts learning rate scheme.\n\n An example of config:\n periods = [10, 10, 10, 10]\n restart_weights = [1, 0.5, 0.5, 0.5]\n eta_min=1e-7\n\n It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the\n scheduler will restart with the weights in restart_weights.\n\n Args:\n optimizer (torch.nn.optimizer): Torch optimizer.\n periods (list): Period for each cosine anneling cycle.\n restart_weights (list): Restart weights at each restart iteration.\n Default: [1].\n eta_min (float): The mimimum lr. Default: 0.\n last_epoch (int): Used in _LRScheduler. Default: -1.\n ' def __init__(self, optimizer, periods, restart_weights=[1], eta_min=0, last_epoch=(- 1)): self.periods = periods self.restart_weights = restart_weights self.eta_min = eta_min assert (len(self.periods) == len(self.restart_weights)), 'periods and restart_weights should have the same length.' self.cumulative_period = [sum(self.periods[0:(i + 1)]) for i in range(0, len(self.periods))] super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch) def get_lr(self): idx = get_position_from_periods(self.last_epoch, self.cumulative_period) current_weight = self.restart_weights[idx] nearest_restart = (0 if (idx == 0) else self.cumulative_period[(idx - 1)]) current_period = self.periods[idx] return [(self.eta_min + (((current_weight * 0.5) * (base_lr - self.eta_min)) * (1 + math.cos((math.pi * ((self.last_epoch - nearest_restart) / current_period)))))) for base_lr in self.base_lrs]
def import_yuv(seq_path, h, w, tot_frm, yuv_type='420p', start_frm=0, only_y=True): 'Load Y, U, and V channels separately from a 8bit yuv420p video.\n \n Args:\n seq_path (str): .yuv (imgs) path.\n h (int): Height.\n w (int): Width.\n tot_frm (int): Total frames to be imported.\n yuv_type: 420p or 444p\n start_frm (int): The first frame to be imported. Default 0.\n only_y (bool): Only import Y channels.\n\n Return:\n y_seq, u_seq, v_seq (3 channels in 3 ndarrays): Y channels, U channels, \n V channels.\n\n Note:\n YUV传统上是模拟信号格式, 而YCbCr才是数字信号格式.YUV格式通常实指YCbCr文件.\n 参见: https://en.wikipedia.org/wiki/YUV\n ' if (yuv_type == '420p'): (hh, ww) = ((h // 2), (w // 2)) elif (yuv_type == '444p'): (hh, ww) = (h, w) else: raise Exception('yuv_type not supported.') (y_size, u_size, v_size) = ((h * w), (hh * ww), (hh * ww)) blk_size = ((y_size + u_size) + v_size) y_seq = np.zeros((tot_frm, h, w), dtype=np.uint8) if (not only_y): u_seq = np.zeros((tot_frm, hh, ww), dtype=np.uint8) v_seq = np.zeros((tot_frm, hh, ww), dtype=np.uint8) with open(seq_path, 'rb') as fp: for i in range(tot_frm): fp.seek(int((blk_size * (start_frm + i))), 0) y_frm = np.fromfile(fp, dtype=np.uint8, count=y_size).reshape(h, w) if only_y: y_seq[(i, ...)] = y_frm else: u_frm = np.fromfile(fp, dtype=np.uint8, count=u_size).reshape(hh, ww) v_frm = np.fromfile(fp, dtype=np.uint8, count=v_size).reshape(hh, ww) (y_seq[(i, ...)], u_seq[(i, ...)], v_seq[(i, ...)]) = (y_frm, u_frm, v_frm) if only_y: return y_seq else: return (y_seq, u_seq, v_seq)
def write_ycbcr(y, cb, cr, vid_path): with open(vid_path, 'wb') as fp: for ite_frm in range(len(y)): fp.write(y[ite_frm].reshape(((y[0].shape[0] * y[0].shape[1]),))) fp.write(cb[ite_frm].reshape(((cb[0].shape[0] * cb[0].shape[1]),))) fp.write(cr[ite_frm].reshape(((cr[0].shape[0] * cr[0].shape[1]),)))
class _HardDiskBackend(): 'Raw hard disks storage backend.' def get(self, filepath): filepath = str(filepath) with open(filepath, 'rb') as f: value_buf = f.read() return value_buf
class _LmdbBackend(): 'Lmdb storage backend.\n\n Args:\n db_path (str): Lmdb database path.\n readonly (bool, optional): Lmdb environment parameter. If True,\n disallow any write operations. Default: True.\n lock (bool, optional): Lmdb environment parameter. If False, when\n concurrent access occurs, do not lock the database. Default: False.\n readahead (bool, optional): Lmdb environment parameter. If False,\n disable the OS filesystem readahead mechanism, which may improve\n random read performance when a database is larger than RAM.\n Default: False.\n\n Attributes:\n db_paths (str): Lmdb database path.\n ' def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs): try: import lmdb except ImportError: raise ImportError('Please install lmdb to enable LmdbBackend.') if isinstance(client_keys, str): client_keys = [client_keys] if isinstance(db_paths, list): self.db_paths = [str(v) for v in db_paths] elif isinstance(db_paths, str): self.db_paths = [str(db_paths)] assert (len(client_keys) == len(self.db_paths)), f'client_keys and db_paths should have the same length, but received {len(client_keys)} and {len(self.db_paths)}.' self._client = {} for (client, path) in zip(client_keys, self.db_paths): self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) def get(self, filepath, client_key): 'Get values according to the filepath from one lmdb named client_key.\n Args:\n filepath (str | obj:`Path`): Here, filepath is the lmdb key.\n client_key (str): Used for distinguishing differnet lmdb envs.\n ' filepath = str(filepath) assert (client_key in self._client), f'client_key {client_key} is not in lmdb clients.' client = self._client[client_key] with client.begin(write=False) as txn: value_buf = txn.get(filepath.encode('ascii')) return value_buf
class FileClient(object): 'A file client to access LMDB files or general files on disk.\n \n Return a binary file.' _backends = {'disk': _HardDiskBackend, 'lmdb': _LmdbBackend} def __init__(self, backend='disk', **kwargs): if (backend == 'disk'): self.client = _HardDiskBackend() elif (backend == 'lmdb'): self.client = _LmdbBackend(**kwargs) else: raise ValueError(f'Backend {backend} not supported.') self.backend = backend def get(self, filepath, client_key='default'): if (self.backend == 'lmdb'): return self.client.get(filepath, client_key) else: return self.client.get(filepath)
def dict2str(input_dict, indent=0): 'Dict to string for printing options.' msg = '' indent_msg = (' ' * indent) for (k, v) in input_dict.items(): if isinstance(v, dict): msg += ((indent_msg + k) + ':[\n') msg += dict2str(v, (indent + 2)) msg += (indent_msg + ' ]\n') else: msg += ((((indent_msg + k) + ': ') + str(v)) + '\n') return msg
class PrefetchGenerator(threading.Thread): 'A general prefetch generator.\n\n Ref:\n https://stackoverflow.com/questions/7323664/python-generator-pre-fetch\n\n Args:\n generator: Python generator.\n num_prefetch_queue (int): Number of prefetch queue.\n ' def __init__(self, generator, num_prefetch_queue): threading.Thread.__init__(self) self.queue = Queue.Queue(num_prefetch_queue) self.generator = generator self.daemon = True self.start() def run(self): for item in self.generator: self.queue.put(item) self.queue.put(None) def __next__(self): next_item = self.queue.get() if (next_item is None): raise StopIteration return next_item def __iter__(self): return self
class PrefetchDataLoader(DataLoader): 'Prefetch version of dataloader.\n\n Ref:\n https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#\n\n TODO:\n Need to test on single gpu and ddp (multi-gpu). There is a known issue in\n ddp.\n\n Args:\n num_prefetch_queue (int): Number of prefetch queue.\n kwargs (dict): Other arguments for dataloader.\n ' def __init__(self, num_prefetch_queue, **kwargs): self.num_prefetch_queue = num_prefetch_queue super(PrefetchDataLoader, self).__init__(**kwargs) def __iter__(self): return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
class CPUPrefetcher(): 'CPU prefetcher.\n\n Args:\n loader: Dataloader.\n ' def __init__(self, loader): self.ori_loader = loader self.loader = iter(loader) def next(self): try: return next(self.loader) except StopIteration: return None def reset(self): self.loader = iter(self.ori_loader)
class CUDAPrefetcher(): 'CUDA prefetcher.\n\n Ref:\n https://github.com/NVIDIA/apex/issues/304#\n\n It may consums more GPU memory.\n\n Args:\n loader: Dataloader.\n opt (dict): Options.\n ' def __init__(self, loader, opt): self.ori_loader = loader self.loader = iter(loader) self.opt = opt self.stream = torch.cuda.Stream() self.device = torch.device(('cuda' if (opt['num_gpu'] != 0) else 'cpu')) self.preload() def preload(self): try: self.batch = next(self.loader) except StopIteration: self.batch = None return None with torch.cuda.stream(self.stream): for (k, v) in self.batch.items(): if torch.is_tensor(v): self.batch[k] = self.batch[k].to(device=self.device, non_blocking=True) def next(self): torch.cuda.current_stream().wait_stream(self.stream) batch = self.batch self.preload() return batch def reset(self): self.loader = iter(self.ori_loader) self.preload()
def make_lmdb_from_imgs(img_dir, lmdb_path, img_path_list, keys, batch=5000, compress_level=1, multiprocessing_read=False, map_size=None): 'Make lmdb from images.\n\n Args:\n img_dir (str): Image root dir.\n lmdb_path (str): LMDB save path.\n img_path_list (str): Image subpath under the image_dir.\n keys (str): LMDB keys.\n batch (int): After processing batch images, lmdb commits.\n compress_level (int): Compress level when encoding images. ranges from \n 0 to 9, where 0 means no compression.\n multiprocessing_read (bool): Whether use multiprocessing to read all\n the images to memory. If True, it will read all the images to \n memory using multiprocessing. Thus, your server needs to have \n enough memory.\n map_size (int | None): Map size for lmdb env. If None, use the\n estimated size from images. Default: None\n\n Usage instance: see STDF-PyTorch.\n\n Contents of lmdb. The file structure is:\n example.lmdb\n ├── data.mdb\n ├── lock.mdb\n └── meta_info.txt\n\n The data.mdb and lock.mdb are standard lmdb files. Refer to\n https://lmdb.readthedocs.io/en/release/ for more details.\n\n The meta_info.txt is a specified txt file to record the meta information\n of our datasets. It will be automatically created when preparing\n datasets by our provided dataset tools.\n Each line in the txt file records:\n 1)image name (with extension), \n 2)image shape, \n 3)compression level, \n separated by a white space.\n\n E.g., 00001/0001/im1.png (256,448,3) 1\n Image path: 00001/0001/im1.png\n (HWC): (256,448,3)\n Compression level: 1\n Key: 00001/0001/im1\n ' assert (len(img_path_list) == len(keys)), f'img_path_list and keys should have the same length, but got {len(img_path_list)} and {len(keys)}' assert lmdb_path.endswith('.lmdb'), "lmdb_path must end with '.lmdb'." assert (not op.exists(lmdb_path)), f'Folder {lmdb_path} already exists. Exit.' num_img = len(img_path_list) if multiprocessing_read: def _callback(arg): 'Register imgs and shapes into the dict & update pbar.' (key, img_byte, img_shape) = arg (dataset[key], shapes[key]) = (img_byte, img_shape) pbar.set_description(f'Read {key}') pbar.update(1) dataset = {} shapes = {} pbar = tqdm(total=num_img, ncols=80) pool = Pool() for (path, key) in zip(img_path_list, keys): pool.apply_async(_read_img_worker, args=(op.join(img_dir, path), key, compress_level), callback=_callback) pool.close() pool.join() pbar.close() if (map_size is None): img = cv2.imread(op.join(img_dir, img_path_list[0]), cv2.IMREAD_UNCHANGED) (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) data_size_per_img = img_byte.nbytes data_size = (data_size_per_img * len(img_path_list)) map_size = (data_size * 10) env = lmdb.open(lmdb_path, map_size=map_size) txn = env.begin(write=True) txt_file = open(op.join(lmdb_path, 'meta_info.txt'), 'w') pbar = tqdm(total=num_img, ncols=80) for (idx, (path, key)) in enumerate(zip(img_path_list, keys)): pbar.set_description(f'Write {key}') pbar.update(1) if multiprocessing_read: img_byte = dataset[key] (h, w, c) = shapes[key] else: (_, img_byte, img_shape) = _read_img_worker(op.join(img_dir, path), key, compress_level) (h, w, c) = img_shape key_byte = key.encode('ascii') txn.put(key_byte, img_byte) txt_file.write(f'''{key}.png ({h},{w},{c}) {compress_level} ''') if ((idx % batch) == 0): txn.commit() txn = env.begin(write=True) pbar.close() txn.commit() env.close() txt_file.close()
def _read_img_worker(path, key, compress_level): 'Read image worker.\n\n Args:\n path (str): Image path.\n key (str): Image key.\n compress_level (int): Compress level when encoding images.\n\n Returns:\n str: Image key.\n byte: Image byte.\n tuple[int]: Image shape.\n\n 不要把该函数放到主函数里,否则无法并行。\n ' img = cv2.imread(path, cv2.IMREAD_UNCHANGED) if (img.ndim == 2): (h, w) = img.shape c = 1 else: (h, w, c) = img.shape (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) return (key, img_byte, (h, w, c))
def _read_y_from_yuv_worker(video_path, yuv_type, h, w, index_frame, key, compress_level): '不要把该函数放到主函数里,否则无法并行。' if (h == None): (w, h) = [int(k) for k in op.basename(video_path).split('_')[1].split('x')] img = import_yuv(seq_path=video_path, yuv_type=yuv_type, h=h, w=w, tot_frm=1, start_frm=index_frame, only_y=True) img = np.squeeze(img) c = 1 (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) return (key, img_byte, (h, w, c))
def make_y_lmdb_from_yuv(video_path_list, index_frame_list, key_list, lmdb_path, yuv_type='420p', h=None, w=None, batch=7000, compress_level=1, multiprocessing_read=False, map_size=None): assert lmdb_path.endswith('.lmdb'), "lmdb_path must end with '.lmdb'." assert (not op.exists(lmdb_path)), f'Folder {lmdb_path} already exists.' num_img = len(key_list) assert multiprocessing_read, 'Not implemented.' def _callback(arg): 'Register imgs and shapes into the dict & update pbar.' (key, img_byte, img_shape) = arg (dataset[key], shapes[key]) = (img_byte, img_shape) pbar.set_description(f'Reading {key}') pbar.update(1) dataset = {} shapes = {} pbar = tqdm(total=num_img, ncols=80) pool = Pool() for iter_frm in range(num_img): pool.apply_async(_read_y_from_yuv_worker, args=(video_path_list[iter_frm], yuv_type, h, w, index_frame_list[iter_frm], key_list[iter_frm], compress_level), callback=_callback) pool.close() pool.join() pbar.close() if (map_size is None): biggest_index = 0 biggest_size = 0 for iter_img in range(num_img): vid_path = video_path_list[iter_img] if (w == None): (w, h) = map(int, vid_path.split('.')[(- 2)].split('_')[(- 2)].split('x')) img_size = (w * h) if (img_size > biggest_size): biggest_size = img_size biggest_index = iter_img (_, img_byte, _) = _read_y_from_yuv_worker(video_path_list[biggest_index], yuv_type, h, w, index_frame_list[biggest_index], key_list[biggest_index], compress_level) data_size_per_img = img_byte.nbytes data_size = (data_size_per_img * num_img) map_size = (data_size * 10) env = lmdb.open(lmdb_path, map_size=map_size) txn = env.begin(write=True) txt_file = open(op.join(lmdb_path, 'meta_info.txt'), 'w') pbar = tqdm(total=num_img, ncols=80) for (idx, key) in enumerate(key_list): pbar.set_description(f'Writing {key}') pbar.update(1) img_byte = dataset[key] (h, w, c) = shapes[key] key_byte = key.encode('ascii') txn.put(key_byte, img_byte) txt_file.write(f'''{key} ({h},{w},{c}) {compress_level} ''') if ((idx % batch) == 0): txn.commit() txn = env.begin(write=True) pbar.close() txn.commit() env.close() txt_file.close()
def calculate_psnr(img0, img1, data_range=None): 'Calculate PSNR (Peak Signal-to-Noise Ratio).\n \n Args:\n img0 (ndarray)\n img1 (ndarray)\n data_range (int, optional): Distance between minimum and maximum \n possible values). By default, this is estimated from the image \n data-type.\n \n Return:\n psnr (float)\n ' psnr = skm.peak_signal_noise_ratio(img0, img1, data_range=data_range) return psnr
def calculate_ssim(img0, img1, data_range=None): 'Calculate SSIM (Structural SIMilarity).\n\n Args:\n img0 (ndarray)\n img1 (ndarray)\n data_range (int, optional): Distance between minimum and maximum \n possible values). By default, this is estimated from the image \n data-type.\n \n Return:\n ssim (float)\n ' ssim = skm.structural_similarity(img0, img1, data_range=data_range) return ssim
def calculate_mse(img0, img1): 'Calculate MSE (Mean Square Error).\n\n Args:\n img0 (ndarray)\n img1 (ndarray)\n\n Return:\n mse (float)\n ' mse = skm.mean_squared_error(img0, img1) return mse
def mkdir(dir_path): 'Create directory.\n \n Args:\n dir_path (str)\n ' assert (not op.exists(dir_path)), 'Dir already exists!' os.makedirs(dir_path)
def get_timestr(): 'Return current time str.' return time.strftime('%Y%m%d_%H%M%S', time.localtime())
class Timer(): def __init__(self): self.reset() def reset(self): self.start_time = time.time() self.accum_time = 0 def restart(self): self.start_time = time.time() def accum(self): self.accum_time += (time.time() - self.start_time) def get_time(self): return time.time() def get_interval(self): return (time.time() - self.start_time) def get_accum(self): return self.accum_time
class Counter(): def __init__(self): self.reset() def reset(self): self.time = 0 self.accum_volume = 0 def accum(self, volume): self.time += 1 self.accum_volume += volume def get_ave(self): return (self.accum_volume / self.time)
def generate_k_iclause(n, k): vs = np.random.choice(n, size=min(n, k), replace=False) return [((v + 1) if (random.random() < 0.5) else (- (v + 1))) for v in vs]
def gen_iclause_pair(args, n): solver = minisolvers.MinisatSolver() for i in range(n): solver.new_var(dvar=True) iclauses = [] while True: k_base = (1 if (random.random() < args.p_k_2) else 2) k = (k_base + np.random.geometric(args.p_geo)) iclause = generate_k_iclause(n, k) solver.add_clause(iclause) is_sat = solver.solve() if is_sat: iclauses.append(iclause) else: break iclause_unsat = iclause iclause_sat = ([(- iclause_unsat[0])] + iclause_unsat[1:]) return (n, iclauses, iclause_unsat, iclause_sat)
def generate(args): f = open(args.gen_log, 'w') n_cnt = ((args.max_n - args.min_n) + 1) problems_per_n = ((args.n_pairs * 1.0) / n_cnt) problems = [] batches = [] n_nodes_in_batch = 0 prev_n_vars = None for n_var in range(args.min_n, (args.max_n + 1)): lower_bound = int(((n_var - args.min_n) * problems_per_n)) upper_bound = int((((n_var - args.min_n) + 1) * problems_per_n)) for problems_idx in range(lower_bound, upper_bound): (n_vars, iclauses, iclause_unsat, iclause_sat) = gen_iclause_pair(args, n_var) if (random.random() < 0.5): iclauses.append(iclause_unsat) else: iclauses.append(iclause_sat) n_clauses = len(iclauses) n_cells = sum([len(iclause) for iclause in iclauses]) n_nodes = ((2 * n_vars) + n_clauses) if (n_nodes > args.max_nodes_per_batch): continue batch_ready = False if (args.one and (len(problems) > 0)): batch_ready = True elif (prev_n_vars and (n_vars != prev_n_vars)): batch_ready = True elif ((not args.one) and ((n_nodes_in_batch + n_nodes) > args.max_nodes_per_batch)): batch_ready = True if batch_ready: batches.append(mk_batch_problem(problems)) print(('batch %d done (%d vars, %d problems)...' % (len(batches), prev_n_vars, len(problems))), file=f) del problems[:] n_nodes_in_batch = 0 prev_n_vars = n_vars (is_sat, stats) = solve_sat(n_vars, iclauses) problems.append((('sr_n=%.4d_pk2=%.2f_pg=%.2f_t=%d_sat=0' % (n_vars, args.p_k_2, args.p_geo, problems_idx)), n_vars, iclauses, is_sat)) n_nodes_in_batch += n_nodes if (len(problems) > 0): batches.append(mk_batch_problem(problems)) print(('batch %d done (%d vars, %d problems)...' % (len(batches), n_vars, len(problems))), file=f) del problems[:] return batches
def load_model(args, log_file=None): net = NeuroSAT(args) net = net.cuda() if args.restore: if (log_file is not None): print('restoring from', args.restore, file=log_file, flush=True) model = torch.load(args.restore) net.load_state_dict(model['state_dict']) return net
def predict(net, data): net.eval() outputs = net(data) probs = net.vote preds = torch.where((outputs > 0.5), torch.ones(outputs.shape).cuda(), torch.zeros(outputs.shape).cuda()) return (preds.cpu().detach().numpy(), probs.cpu().detach().numpy())
class MLP(nn.Module): def __init__(self, in_dim, hidden_dim, out_dim): super(MLP, self).__init__() self.l1 = nn.Linear(in_dim, hidden_dim) self.l2 = nn.Linear(hidden_dim, hidden_dim) self.l3 = nn.Linear(hidden_dim, out_dim) def forward(self, x): x = self.l1(x) x = self.l2(x) x = self.l3(x) return x
def solve_sat(n_vars, iclauses): solver = minisolvers.MinisatSolver() for i in range(n_vars): solver.new_var(dvar=True) for iclause in iclauses: solver.add_clause(iclause) is_sat = solver.solve() stats = solver.get_stats() return (is_sat, stats)
@dataclass class AdversarialOptBase(): pass
@dataclass class FGSMOpt(AdversarialOptBase): eps: float = field(default=1e-05, metadata={'help': 'The noise coefficient to multiply the sign of gradient.Controls the extent of noise.'})
@dataclass class FGMOpt(AdversarialOptBase): eps: float = field(default=1e-05, metadata={'help': 'The noise coefficient to multiply the sign of gradient divided by its norm.Controls the extent of noise.'})
@dataclass class FreeLBOpt(AdversarialOptBase): adv_init_msg: float = field(default=0, metadata={'help': '\n TO DO.\n \n '}) norm_type: str = field(default='l2', metadata={'help': '\n The norm to use. \n Must be either l2 or linf.\n Default is l2.\n '}) adv_steps: int = field(default=5, metadata={'help': '\n The number of adversarial training steps. \n Default is 5.\n '}) adv_lr: int = field(default=1e-05, metadata={'help': '\n The learning rate for the adversarial training steps.\n Default value is 1e-5\n '}) def __post_init__(self): if (self.norm_type not in {'l2', 'linf'}): logging.error('The norm type must be either l2 or linf') if (self.adv_steps <= 0): logging.error('The adversarial training steps must be an integer that is larger than 0') if (self.adv_lr <= 0): logging.error('The adversarial learning rate must be larger than 0.')
def is_torch_available(): return _torch_available
def is_tf_available(): return _tf_available
def is_torch_tpu_available(): return _torch_tpu_available
def is_psutil_available(): return _psutil_available
def is_py3nvml_available(): return _py3nvml_available
def is_apex_available(): return _has_apex
def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (''.join(docstr) + (fn.__doc__ if (fn.__doc__ is not None) else '')) return fn return docstring_decorator
def add_start_docstrings_to_callable(*docstr): def docstring_decorator(fn): class_name = ':class:`~transformers.{}`'.format(fn.__qualname__.split('.')[0]) intro = ' The {} forward method, overrides the :func:`__call__` special method.'.format(class_name) note = '\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n ' fn.__doc__ = (((intro + note) + ''.join(docstr)) + (fn.__doc__ if (fn.__doc__ is not None) else '')) return fn return docstring_decorator
def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ + ''.join(docstr)) return fn return docstring_decorator
def _prepare_output_docstrings(output_type, config_class): '\n Prepares the return part of the docstring using `output_type`.\n ' docstrings = output_type.__doc__ lines = docstrings.split('\n') i = 0 while ((i < len(lines)) and (re.search('^\\s*(Args|Parameters):\\s*$', lines[i]) is None)): i += 1 if (i < len(lines)): docstrings = '\n'.join(lines[(i + 1):]) full_output_type = f'{output_type.__module__}.{output_type.__name__}' intro = RETURN_INTRODUCTION.format(full_output_type=full_output_type, config_class=config_class) return (intro + docstrings)
def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None): def docstring_decorator(fn): model_class = fn.__qualname__.split('.')[0] is_tf_class = (model_class[:2] == 'TF') if ('SequenceClassification' in model_class): code_sample = (TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE) elif ('QuestionAnswering' in model_class): code_sample = (TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE) elif ('TokenClassification' in model_class): code_sample = (TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE) elif ('MultipleChoice' in model_class): code_sample = (TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE) elif ('MaskedLM' in model_class): code_sample = (TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE) elif ('LMHead' in model_class): code_sample = (TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE) elif ('Model' in model_class): code_sample = (TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE) else: raise ValueError(f"Docstring can't be built for model {model_class}") output_doc = (_prepare_output_docstrings(output_type, config_class) if (output_type is not None) else '') built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint) fn.__doc__ = ((((fn.__doc__ or '') + ''.join(docstr)) + output_doc) + built_doc) return fn return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): docstrings = fn.__doc__ lines = docstrings.split('\n') i = 0 while ((i < len(lines)) and (re.search('^\\s*Returns?:\\s*$', lines[i]) is None)): i += 1 if (i < len(lines)): lines[i] = _prepare_output_docstrings(output_type, config_class) docstrings = '\n'.join(lines) else: raise ValueError(f'''The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is: {docstrings}''') fn.__doc__ = docstrings return fn return docstring_decorator
def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return (parsed.scheme in ('http', 'https'))
def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: "\n Resolve a model identifier, and a file name, to a HF-hosted url\n on either S3 or Cloudfront (a Content Delivery Network, or CDN).\n Cloudfront is replicated over the globe so downloads are way faster\n for the end user (and it also lowers our bandwidth costs). However, it\n is more aggressively cached by default, so may not always reflect the\n latest changes to the underlying file (default TTL is 24 hours).\n In terms of client-side caching from this library, even though\n Cloudfront relays the ETags from S3, using one or the other\n (or switching from one to the other) will affect caching: cached files\n are not shared between the two because the cached file's name contains\n a hash of the url.\n " endpoint = (CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX) legacy_format = ('/' not in model_id) if legacy_format: return f'{endpoint}/{model_id}-{filename}' else: return f'{endpoint}/{model_id}/{filename}'
def url_to_filename(url, etag=None): "\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name\n so that TF 2.0 can identify it as a HDF5 file\n (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)\n " url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += ('.' + etag_hash.hexdigest()) if url.endswith('.h5'): filename += '.h5' return filename
def filename_to_url(filename, cache_dir=None): '\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n ' if (cache_dir is None): cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if (not os.path.exists(cache_path)): raise EnvironmentError('file {} not found'.format(cache_path)) meta_path = (cache_path + '.json') if (not os.path.exists(meta_path)): raise EnvironmentError('file {} not found'.format(meta_path)) with open(meta_path, encoding='utf-8') as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return (url, etag)