code
stringlengths
17
6.64M
def parse_args(): parser = argparse.ArgumentParser(description='Preprocess REDS datasets', epilog='You can first download REDS datasets using the script from:https://gist.github.com/SeungjunNah/b10d369b92840cb8dd2118dd4f41d643') parser.add_argument('--root-path', type=str, help='root path for REDS') parser.add_argument('--make-lmdb', action='store_true', help='create lmdb files') args = parser.parse_args() return args
def make_lmdb(mode, data_path, lmdb_path, train_list, batch=5000, compress_level=1): "Create lmdb for the Vimeo90K dataset.\n\n Contents of lmdb. The file structure is:\n example.lmdb\n ├── data.mdb\n ├── lock.mdb\n ├── meta_info.txt\n\n The data.mdb and lock.mdb are standard lmdb files and you can refer to\n https://lmdb.readthedocs.io/en/release/ for more details.\n\n The meta_info.txt is a specified txt file to record the meta information\n of our datasets. It will be automatically created when preparing\n datasets by our provided dataset tools.\n Each line in the txt file records 1)image name (with extension),\n 2)image shape, and 3)compression level, separated by a white space.\n\n For example, the meta information could be:\n `000_00000000.png (720,1280,3) 1`, which means:\n 1) image name (with extension): 000_00000000.png;\n 2) image shape: (720,1280,3);\n 3) compression level: 1\n\n We use the image name without extension as the lmdb key.\n\n Args:\n mode (str): Dataset mode. 'gt' or 'lq'.\n data_path (str): Data path for reading images.\n lmdb_path (str): Lmdb save path.\n train_list (str): Train list path for Vimeo90K datasets.\n batch (int): After processing batch images, lmdb commits.\n Default: 5000.\n compress_level (int): Compress level when encoding images. Default: 1.\n " print(f'Create lmdb for {data_path}, save to {lmdb_path}...') if (mode == 'gt'): (h_dst, w_dst) = (256, 448) else: (h_dst, w_dst) = (64, 112) if osp.exists(lmdb_path): print(f'Folder {lmdb_path} already exists. Exit.') sys.exit(1) print('Reading image path list ...') with open(train_list) as f: train_list = [line.strip() for line in f] all_img_list = [] keys = [] for line in train_list: (folder, sub_folder) = line.split('/') for j in range(1, 8): all_img_list.append(osp.join(data_path, folder, sub_folder, f'im{j}.png')) keys.append('{}_{}_{}'.format(folder, sub_folder, j)) all_img_list = sorted(all_img_list) keys = sorted(keys) if (mode == 'gt'): print('Only keep the 4th frame for gt mode.') all_img_list = [v for v in all_img_list if v.endswith('im4.png')] keys = [v for v in keys if v.endswith('_4')] img = mmcv.imread(osp.join(data_path, all_img_list[0]), flag='unchanged') (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) data_size_per_img = img_byte.nbytes print('Data size per image is: ', data_size_per_img) data_size = (data_size_per_img * len(all_img_list)) env = lmdb.open(lmdb_path, map_size=(data_size * 10)) pbar = mmcv.ProgressBar(len(all_img_list)) txn = env.begin(write=True) txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') for (idx, (path, key)) in enumerate(zip(all_img_list, keys)): pbar.update() key_byte = key.encode('ascii') img = mmcv.imread(osp.join(data_path, path), flag='unchanged') (h, w, c) = img.shape (_, img_byte) = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) assert ((h == h_dst) and (w == w_dst) and (c == 3)), f'Wrong shape ({(h, w)}), should be ({(h_dst, w_dst)}).' txn.put(key_byte, img_byte) txt_file.write(f'''{key}.png ({h},{w},{c}) {compress_level} ''') if ((idx % batch) == 0): txn.commit() txn = env.begin(write=True) txn.commit() env.close() txt_file.close() print('\nFinish writing lmdb.')
def generate_anno_file(train_list, file_name='meta_info_Vimeo90K_GT.txt'): "Generate anno file for Vimeo90K datasets from the official train list.\n\n Args:\n train_list (str): Train list path for Vimeo90K datasets.\n file_name (str): Saved file name. Default: 'meta_info_Vimeo90K_GT.txt'.\n " print(f'Generate annotation files {file_name}...') with open(train_list) as f: lines = [line.rstrip() for line in f] txt_file = osp.join(osp.dirname(train_list), file_name) with open(txt_file, 'w') as f: for line in lines: f.write(f'''{line} (256, 448, 3) ''')
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='Preprocess Vimeo90K datasets', epilog='You can download the Vimeo90K dataset from:http://toflow.csail.mit.edu/') parser.add_argument('train_list', help='official training list path for Vimeo90K') parser.add_argument('--gt-path', default=None, help='GT path for Vimeo90K') parser.add_argument('--lq-path', default=None, help='LQ path for Vimeo90K') parser.add_argument('--make-lmdb', action='store_true', help='create lmdb files') args = parser.parse_args() return args
class TensorRTRestorerGenerator(nn.Module): 'Inner class for tensorrt restorer model inference\n\n Args:\n trt_file (str): The path to the tensorrt file.\n device_id (int): Which device to place the model.\n ' def __init__(self, trt_file: str, device_id: int): super().__init__() from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin try: load_tensorrt_plugin() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with TensorRT from source.') model = TRTWrapper(trt_file, input_names=['input'], output_names=['output']) self.device_id = device_id self.model = model def forward(self, x): with torch.cuda.device(self.device_id), torch.no_grad(): seg_pred = self.model({'input': x})['output'] seg_pred = seg_pred.detach().cpu() return seg_pred
class TensorRTRestorer(nn.Module): 'A warper class for tensorrt restorer\n\n Args:\n base_model (Any): The base model build from config.\n trt_file (str): The path to the tensorrt file.\n device_id (int): Which device to place the model.\n ' def __init__(self, base_model: Any, trt_file: str, device_id: int): super().__init__() self.base_model = base_model restorer_generator = TensorRTRestorerGenerator(trt_file=trt_file, device_id=device_id) base_model.generator = restorer_generator def forward(self, lq, gt=None, test_mode=False, **kwargs): return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class TensorRTEditing(nn.Module): 'A class for testing tensorrt deployment\n\n Args:\n trt_file (str): The path to the tensorrt file.\n cfg (Any): The configuration of the testing, decided by the config file.\n device_id (int): Which device to place the model.\n ' def __init__(self, trt_file: str, cfg: Any, device_id: int): super().__init__() base_model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) if isinstance(base_model, BasicRestorer): WrapperClass = TensorRTRestorer self.wrapper = WrapperClass(base_model, trt_file, device_id) def forward(self, **kwargs): return self.wrapper(**kwargs)
def parse_args(): parser = argparse.ArgumentParser(description='mmediting tester') parser.add_argument('config', help='test config file path') parser.add_argument('model', help='input model file') parser.add_argument('backend', help='backend of the model.', choices=['onnxruntime', 'tensorrt']) parser.add_argument('--out', help='output result pickle file') parser.add_argument('--save-path', default=None, type=str, help='path to store images and if not given, will not save image') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') args = parser.parse_args() return args
def main(): args = parse_args() cfg = Config.fromfile(args.config) if (args.cfg_options is not None): cfg.merge_from_dict(args.cfg_options) distributed = False dataset = build_dataset(cfg.data.test) loader_cfg = {**dict(((k, cfg.data[k]) for k in ['workers_per_gpu'] if (k in cfg.data))), **dict(samples_per_gpu=1, drop_last=False, shuffle=False, dist=distributed), **cfg.data.get('test_dataloader', {})} data_loader = build_dataloader(dataset, **loader_cfg) if (args.backend == 'onnxruntime'): model = ONNXRuntimeEditing(args.model, cfg=cfg, device_id=0) elif (args.backend == 'tensorrt'): model = TensorRTEditing(args.model, cfg=cfg, device_id=0) args.save_image = (args.save_path is not None) model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, save_path=args.save_path, save_image=args.save_image) print() stats = dataset.evaluate(outputs) for stat in stats: print('Eval-{}: {}'.format(stat, stats[stat])) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out)
def mmedit2torchserve(config_file: str, checkpoint_file: str, output_folder: str, model_name: str, model_version: str='1.0', force: bool=False): "Converts MMEditing model (config + checkpoint) to TorchServe `.mar`.\n Args:\n config_file:\n In MMEditing config format.\n The contents vary for each task repository.\n checkpoint_file:\n In MMEditing checkpoint format.\n The contents vary for each task repository.\n output_folder:\n Folder where `{model_name}.mar` will be created.\n The file created will be in TorchServe archive format.\n model_name:\n If not None, used for naming the `{model_name}.mar` file\n that will be created under `output_folder`.\n If None, `{Path(checkpoint_file).stem}` will be used.\n model_version:\n Model's version.\n force:\n If True, if there is an existing `{model_name}.mar`\n file under `output_folder` it will be overwritten.\n " mmcv.mkdir_or_exist(output_folder) config = mmcv.Config.fromfile(config_file) with TemporaryDirectory() as tmpdir: config.dump(f'{tmpdir}/config.py') args_ = Namespace(**{'model_file': f'{tmpdir}/config.py', 'serialized_file': checkpoint_file, 'handler': f'{Path(__file__).parent}/mmedit_handler.py', 'model_name': (model_name or Path(checkpoint_file).stem), 'version': model_version, 'export_path': output_folder, 'force': force, 'requirements_file': None, 'extra_files': None, 'runtime': 'python', 'archive_format': 'default'}) print(args_.model_name) manifest = ModelExportUtils.generate_manifest_json(args_) package_model(args_, manifest)
def parse_args(): parser = ArgumentParser(description='Convert MMEditing models to TorchServe `.mar` format.') parser.add_argument('config', type=str, help='config file path') parser.add_argument('checkpoint', type=str, help='checkpoint file path') parser.add_argument('--output-folder', type=str, required=True, help='Folder where `{model_name}.mar` will be created.') parser.add_argument('--model-name', type=str, default=None, help='If not None, used for naming the `{model_name}.mar`file that will be created under `output_folder`.If None, `{Path(checkpoint_file).stem}` will be used.') parser.add_argument('--model-version', type=str, default='1.0', help='Number used for versioning.') parser.add_argument('-f', '--force', action='store_true', help='overwrite the existing `{model_name}.mar`') args_ = parser.parse_args() return args_
class MMEditHandler(BaseHandler): def initialize(self, context): print('MMEditHandler.initialize is called') properties = context.system_properties self.map_location = ('cuda' if torch.cuda.is_available() else 'cpu') self.device = torch.device((((self.map_location + ':') + str(properties.get('gpu_id'))) if torch.cuda.is_available() else self.map_location)) self.manifest = context.manifest model_dir = properties.get('model_dir') serialized_file = self.manifest['model']['serializedFile'] checkpoint = os.path.join(model_dir, serialized_file) self.config_file = os.path.join(model_dir, 'config.py') self.model = init_model(self.config_file, checkpoint, self.device) self.initialized = True def preprocess(self, data, *args, **kwargs): body = (data[0].get('data') or data[0].get('body')) result = Image.open(BytesIO(body)) return result def inference(self, data, *args, **kwargs): temp_name = ''.join(random.sample((string.ascii_letters + string.digits), 18)) temp_path = f'./{temp_name}.png' data.save(temp_path) results = restoration_inference(self.model, temp_path) os.remove(temp_path) return results def postprocess(self, data): output_list = [] for data_ in data: data_np = tensor2img(data_) data_byte = data_np.tobytes() output_list.append(data_byte) return output_list
def parse_args(): parser = ArgumentParser() parser.add_argument('model_name', help='The model name in the server') parser.add_argument('--inference-addr', default='127.0.0.1:8080', help='Address and port of the inference server') parser.add_argument('--img-path', type=str, help='The input LQ image.') parser.add_argument('--save-path', type=str, help='Path to save the generated GT image.') args = parser.parse_args() return args
def save_results(content, save_path, ori_shape): ori_len = np.prod(ori_shape) scale = int(np.sqrt((len(content) / ori_len))) target_size = [int((size * scale)) for size in ori_shape[:2][::(- 1)]] img = Image.frombytes('RGB', target_size, content, 'raw', 'BGR', 0, 0) img.save(save_path)
def main(args): url = ((('http://' + args.inference_addr) + '/predictions/') + args.model_name) ori_shape = cv2.imread(args.img_path).shape with open(args.img_path, 'rb') as image: response = requests.post(url, image) save_results(response.content, args.save_path, ori_shape)
def evaluate_one(args): 'Function to evaluate one sample of data.\n\n Args:\n args (tuple): Information needed to evaluate one sample of data.\n\n Returns:\n dict: The evaluation results including sad, mse, gradient error and\n connectivity error.\n ' (pred_alpha_path, alpha_path, trimap_path) = args pred_alpha = mmcv.imread(pred_alpha_path, flag='grayscale') alpha = mmcv.imread(alpha_path, flag='grayscale') if (trimap_path is None): trimap = np.ones_like(alpha) else: trimap = mmcv.imread(trimap_path, flag='grayscale') sad_result = sad(alpha, trimap, pred_alpha) mse_result = mse(alpha, trimap, pred_alpha) grad_result = gradient_error(alpha, trimap, pred_alpha) conn_result = connectivity(alpha, trimap, pred_alpha) return (sad_result, mse_result, grad_result, conn_result)
def evaluate(pred_root, gt_root, trimap_root, verbose, nproc): 'Evaluate test results of Adobe composition-1k dataset.\n\n There are 50 different ground truth foregrounds and alpha mattes pairs,\n each of the foreground will be composited with 20 different backgrounds,\n producing 1000 images for testing. In some repo, the ground truth alpha\n matte will be copied 20 times and named the same as the images. This\n function accept both original alpha matte folder (contains 50 ground\n truth alpha mattes) and copied alpha matte folder (contains 1000 ground\n truth alpha mattes) for `gt_root`.\n\n Example of copied name:\n ```\n alpha_matte1.png -> alpha_matte1_0.png\n alpha_matte1_1.png\n ...\n alpha_matte1_19.png\n alpha_matte1_20.png\n ```\n\n Args:\n pred_root (str): Path to the predicted alpha matte folder.\n gt_root (str): Path to the ground truth alpha matte folder.\n trimap_root (str): Path to the predicted alpha matte folder.\n verbose (bool): Whether print result for each predicted alpha matte.\n nproc (int): number of processers.\n ' images = sorted(mmcv.scandir(pred_root)) gt_files_num = len(list(mmcv.scandir(gt_root))) if (gt_files_num == 50): pattern = re.compile('(.+)_(?:\\d+)(.png)') pairs = [] for img in images: pred_alpha_path = osp.join(pred_root, img) if (gt_files_num == 50): groups = pattern.match(img).groups() alpha_path = osp.join(gt_root, ''.join(groups)) else: alpha_path = osp.join(gt_root, img) trimap_path = (osp.join(trimap_root, img) if (trimap_root is not None) else None) pairs.append((pred_alpha_path, alpha_path, trimap_path)) results = mmcv.track_parallel_progress(evaluate_one, pairs, nproc) if verbose: for (i, img) in enumerate(images): (sad_result, mse_result, grad_result, conn_result) = results[i] print(f'{img} SAD: {sad_result:.6g} MSE: {mse_result:.6g} GRAD: {grad_result:.6g} CONN: {conn_result:.6g}') (sad_mean, mse_mean, grad_mean, conn_mean) = np.mean(results, axis=0) print(f'MEAN: SAD: {sad_mean:.6g} MSE: {mse_mean:.6g} GRAD: {grad_mean:.6g} CONN: {conn_mean:.6g}')
def parse_args(): modify_args() parser = argparse.ArgumentParser(description='evaluate composition-1k prediction result') parser.add_argument('pred_root', help='Path to the predicted alpha matte folder') parser.add_argument('gt_root', help='Path to the ground truth alpha matte folder') parser.add_argument('--trimap-root', help='Path to trimap folder. If not specified, results are calculated on the full image.') parser.add_argument('-v', '--verbose', action='store_true', help='Whether print result for each predicted alpha matte') parser.add_argument('--nproc', type=int, default=4, help='number of processers') return parser.parse_args()
def main(): args = parse_args() if (not osp.exists(args.pred_root)): raise FileNotFoundError(f'pred_root {args.pred_root} not found') if (not osp.exists(args.gt_root)): raise FileNotFoundError(f'gt_root {args.gt_root} not found') evaluate(args.pred_root, args.gt_root, args.trimap_root, args.verbose, args.nproc)
def parse_args(): parser = argparse.ArgumentParser(description='Train a editor') parser.add_argument('config', help='train config file path') parser.add_argument('--shape', type=int, nargs='+', default=[250, 250], help='input image size') args = parser.parse_args() return args
def main(): args = parse_args() if (len(args.shape) == 1): input_shape = (3, args.shape[0], args.shape[0]) elif (len(args.shape) == 2): input_shape = ((3,) + tuple(args.shape)) elif (len(args.shape) in [3, 4]): input_shape = tuple(args.shape) else: raise ValueError('invalid input shape') cfg = Config.fromfile(args.config) model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda() model.eval() if hasattr(model, 'forward_dummy'): model.forward = model.forward_dummy else: raise NotImplementedError(f'FLOPs counter is currently not currently supported with {model.__class__.__name__}') (flops, params) = get_model_complexity_info(model, input_shape) split_line = ('=' * 30) print(f'''{split_line} Input shape: {input_shape} Flops: {flops} Params: {params} {split_line}''') if (len(input_shape) == 4): print('!!!If your network computes N frames in one forward pass, you may want to divide the FLOPs by N to get the average FLOPs for each frame.') print('!!!Please be cautious if you use the results in papers. You may need to check if all ops are supported and verify that the flops computation is correct.')
def parse_args(): parser = argparse.ArgumentParser(description='Process a checkpoint to be published') parser.add_argument('in_file', help='input checkpoint filename') parser.add_argument('out_file', help='output checkpoint filename') args = parser.parse_args() return args
def process_checkpoint(in_file, out_file): checkpoint = torch.load(in_file, map_location='cpu') if ('optimizer' in checkpoint): del checkpoint['optimizer'] if (version.parse(torch.__version__) >= version.parse('1.6')): torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False) else: torch.save(checkpoint, out_file) sha = subprocess.check_output(['sha256sum', out_file]).decode() final_file = (out_file.rstrip('.pth') + f'-{sha[:8]}.pth') subprocess.Popen(['mv', out_file, final_file])
def main(): args = parse_args() process_checkpoint(args.in_file, args.out_file)
def parse_args(): parser = argparse.ArgumentParser(description='mmediting tester') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument('--out', help='output result pickle file') parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results') parser.add_argument('--save-path', default=None, type=str, help='path to store images and if not given, will not save image') parser.add_argument('--tmpdir', help='tmp dir for writing some results') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) return args
def main(): args = parse_args() cfg = mmcv.Config.fromfile(args.config) setup_multi_processes(cfg) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) (rank, _) = get_dist_info() if (args.seed is not None): if (rank == 0): print('set random seed to', args.seed) set_random_seed(args.seed, deterministic=args.deterministic) dataset = build_dataset(cfg.data.test) loader_cfg = {**dict(((k, cfg.data[k]) for k in ['workers_per_gpu'] if (k in cfg.data))), **dict(samples_per_gpu=1, drop_last=False, shuffle=False, dist=distributed), **cfg.data.get('test_dataloader', {})} data_loader = build_dataloader(dataset, **loader_cfg) model = build_model(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) args.save_image = (args.save_path is not None) empty_cache = cfg.get('empty_cache', False) if (not distributed): _ = load_checkpoint(model, args.checkpoint, map_location='cpu') model = MMDataParallel(model, device_ids=[0]) outputs = single_gpu_test(model, data_loader, save_path=args.save_path, save_image=args.save_image) else: find_unused_parameters = cfg.get('find_unused_parameters', False) model = DistributedDataParallelWrapper(model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) device_id = torch.cuda.current_device() _ = load_checkpoint(model, args.checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id))) outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, save_path=args.save_path, save_image=args.save_image, empty_cache=empty_cache) if ((rank == 0) and ('eval_result' in outputs[0])): print('') stats = dataset.evaluate(outputs) for stat in stats: print('Eval-{}: {}'.format(stat, stats[stat])) if args.out: print('writing results to {}'.format(args.out)) mmcv.dump(outputs, args.out)
def parse_args(): parser = argparse.ArgumentParser(description='Train an editor') parser.add_argument('config', help='train config file path') parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the checkpoint file to resume from') parser.add_argument('--no-validate', action='store_true', help='whether not to evaluate the checkpoint during training') parser.add_argument('--gpus', type=int, default=1, help='number of gpus to use (only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument('--diff_seed', action='store_true', help='Whether or not set different seeds for different ranks') parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus') args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) return args
def main(): args = parse_args() cfg = Config.fromfile(args.config) setup_multi_processes(cfg) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if (args.work_dir is not None): cfg.work_dir = args.work_dir if (args.resume_from is not None): cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * cfg.gpus) / 8) if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) env_info_dict = collect_env.collect_env() env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()]) dash_line = (('-' * 60) + '\n') logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line)) logger.info('Distributed training: {}'.format(distributed)) logger.info('mmedit Version: {}'.format(__version__)) logger.info('Config:\n{}'.format(cfg.text)) seed = init_random_seed(args.seed) seed = ((seed + dist.get_rank()) if args.diff_seed else seed) logger.info('Set random seed to {}, deterministic: {}'.format(seed, args.deterministic)) set_random_seed(seed, deterministic=args.deterministic) cfg.seed = seed model = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) datasets = [build_dataset(cfg.data.train)] if (len(cfg.workflow) == 2): val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if (cfg.checkpoint_config is not None): cfg.checkpoint_config.meta = dict(mmedit_version=__version__, config=cfg.text) meta = dict() if (cfg.get('exp_name', None) is None): cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0] meta['exp_name'] = cfg.exp_name meta['mmedit Version'] = __version__ meta['seed'] = seed meta['env_info'] = env_info train_model(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
def cal_psnr(original, compressed): mse = np.mean(((original - compressed) ** 2)) if (mse == 0): return np.inf psnr = (10 * log10((max_pixel_square / mse))) return psnr
def main(): parser = argparse.ArgumentParser() parser.add_argument('-gt-dir', default='../mmediting/data/ldv_v2/test_gt') parser.add_argument('-enh-dir', default='../mmediting/data/ldv_v2/test_lq') parser.add_argument('-ignored-frms', type=json.loads, default='{"002":[0]}', help='{"002":[0,]} will leads to error!') parser.add_argument('-save-dir', default='log') args = parser.parse_args() if (not osp.exists(args.save_dir)): os.makedirs(args.save_dir) vid_list = glob(osp.join(args.gt_dir, '*/')) vid_name_list = sorted([vid_path.split('/')[(- 2)] for vid_path in vid_list]) for vid_name in vid_name_list: gt_vid_dir = osp.join(args.gt_dir, vid_name) enh_vid_dir = osp.join(args.enh_dir, vid_name) img_list = glob(osp.join(gt_vid_dir, '*.png')) img_name_list = sorted([img_path.split('/')[(- 1)] for img_path in img_list]) psnr_list = [] bar_ = tqdm(total=len(img_name_list)) for img_name in img_name_list: img_gt = cv2.imread(osp.join(gt_vid_dir, img_name)).astype(float) img_enh = cv2.imread(osp.join(enh_vid_dir, img_name)).astype(float) psnr = cal_psnr(img_enh, img_gt) psnr_list.append(psnr) bar_.update(1) bar_.close() df = pd.DataFrame(dict(psnr=psnr_list)) csv_path = osp.join(args.save_dir, f'{vid_name}.csv') df.to_csv(csv_path) print(f'saved to: {csv_path}') ave_psnr_list = [] inf_num_list = [] ignore_num_list = [] for vid_name in vid_name_list: df = pd.read_csv(osp.join(args.save_dir, f'{vid_name}.csv')) valid_psnr_list = [] inf_num = 0 ignore_num = 0 for (idx_psnr, psnr) in enumerate(df['psnr']): if (psnr == np.inf): inf_num += 1 if ((vid_name in args.ignored_frms) and (idx_psnr in args.ignored_frms[vid_name])): ignore_num += 1 else: valid_psnr_list.append(psnr) inf_num_list.append(inf_num) ignore_num_list.append(ignore_num) ave_psnr_list.append(np.mean(valid_psnr_list)) df = pd.DataFrame(dict(vid_name=vid_name_list, psnr=ave_psnr_list, inf_num=inf_num_list, ignore_num=ignore_num_list)) csv_path = osp.join(args.save_dir, 'ave.csv') df.to_csv(csv_path) print(f'saved to: {csv_path}')
def return_y_from_bgr(img_bgr): img = mmcv.bgr2ycbcr(img_bgr, y_only=True) return img
def cal_psnr(original, compressed): mse = np.mean(((original - compressed) ** 2)) if (mse == 0): return np.inf psnr = (10 * log10((max_pixel_square / mse))) return psnr
def main(): parser = argparse.ArgumentParser() parser.add_argument('-gt-dir', default='../mmediting/data/mfqe_v2/test_gt') parser.add_argument('-enh-dir', default='../mmediting/data/mfqe_v2/test_lq') parser.add_argument('-save-dir', default='log') parser.add_argument('-ignored-frms', type=json.loads, default='{"002":[0]}', help='{"002":[0,]} will leads to error!') parser.add_argument('-order', action='store_true') parser.add_argument('-use-other', action='store_true', help='if not exists, use images at another dir') parser.add_argument('-other-dir', default='../mmediting/data/mfqe_v2/test_lq') args = parser.parse_args() if (not osp.exists(args.save_dir)): os.makedirs(args.save_dir) if args.order: vid_name_list = ['Kimono_1920x1080_240', 'ParkScene_1920x1080_240', 'Cactus_1920x1080_500', 'BQTerrace_1920x1080_600', 'BasketballDrive_1920x1080_500', 'RaceHorses_832x480_300', 'BQMall_832x480_600', 'PartyScene_832x480_500', 'BasketballDrill_832x480_500', 'RaceHorses_416x240_300', 'BQSquare_416x240_600', 'BlowingBubbles_416x240_500', 'BasketballPass_416x240_500', 'FourPeople_1280x720_600', 'Johnny_1280x720_600', 'KristenAndSara_1280x720_600'] else: vid_list = glob(osp.join(args.gt_dir, '*/')) vid_name_list = sorted([vid_path.split('/')[(- 2)] for vid_path in vid_list]) for vid_name in vid_name_list: gt_vid_dir = osp.join(args.gt_dir, vid_name) enh_vid_dir = osp.join(args.enh_dir, vid_name) if ((not osp.exists(enh_vid_dir)) and args.use_other): enh_vid_dir = osp.join(args.other_dir, vid_name) img_list = glob(osp.join(gt_vid_dir, '*.png')) img_name_list = sorted([img_path.split('/')[(- 1)] for img_path in img_list]) psnr_list = [] bar_ = tqdm(total=len(img_name_list)) for img_name in img_name_list: img_gt = cv2.imread(osp.join(gt_vid_dir, img_name)) img_enh = cv2.imread(osp.join(enh_vid_dir, img_name)) img_gt = return_y_from_bgr(img_gt).astype(float) img_enh = return_y_from_bgr(img_enh).astype(float) psnr = cal_psnr(img_enh, img_gt) psnr_list.append(psnr) bar_.update(1) bar_.close() df = pd.DataFrame(dict(psnr=psnr_list)) csv_path = osp.join(args.save_dir, f'{vid_name}.csv') df.to_csv(csv_path) print(f'saved to: {csv_path}') ave_psnr_list = [] inf_num_list = [] ignore_num_list = [] for vid_name in vid_name_list: df = pd.read_csv(osp.join(args.save_dir, f'{vid_name}.csv')) valid_psnr_list = [] inf_num = 0 ignore_num = 0 for (idx_psnr, psnr) in enumerate(df['psnr']): if (psnr == np.inf): inf_num += 1 if ((vid_name in args.ignored_frms) and (idx_psnr in args.ignored_frms[vid_name])): ignore_num += 1 else: valid_psnr_list.append(psnr) inf_num_list.append(inf_num) ignore_num_list.append(ignore_num) ave_psnr_list.append(np.mean(valid_psnr_list)) df = pd.DataFrame(dict(vid_name=vid_name_list, psnr=ave_psnr_list, inf_num=inf_num_list, ignore_num=ignore_num_list)) csv_path = osp.join(args.save_dir, 'ave.csv') df.to_csv(csv_path) print(f'saved to: {csv_path}')
def return_y_from_bgr(img_bgr): img = mmcv.bgr2ycbcr(img_bgr, y_only=True) return img
def cal_psnr(original, compressed): mse = np.mean(((original - compressed) ** 2)) if (mse == 0): return np.inf psnr = (10 * log10((max_pixel_square / mse))) return psnr
def main(): parser = argparse.ArgumentParser() parser.add_argument('-gt-dir', default='../mmediting/data/mfqe_v2/test_gt') parser.add_argument('-enh-dir', default='../mmediting/data/mfqe_v2/test_lq') parser.add_argument('-save-dir', default='log') parser.add_argument('-ignored-frms', type=json.loads, default='{"002":[0]}', help='{"002":[0,]} will leads to error!') parser.add_argument('-order', action='store_true') parser.add_argument('-use-other', action='store_true', help='if not exists, use images at another dir') parser.add_argument('-other-dir', default='../mmediting/data/mfqe_v2/test_lq') args = parser.parse_args() if (not osp.exists(args.save_dir)): os.makedirs(args.save_dir) if args.order: vid_name_list = ['Kimono_1920x1080_240', 'ParkScene_1920x1080_240', 'Cactus_1920x1080_500', 'BQTerrace_1920x1080_600', 'BasketballDrive_1920x1080_500', 'RaceHorses_832x480_300', 'BQMall_832x480_600', 'PartyScene_832x480_500', 'BasketballDrill_832x480_500', 'RaceHorses_416x240_300', 'BQSquare_416x240_600', 'BlowingBubbles_416x240_500', 'BasketballPass_416x240_500', 'FourPeople_1280x720_600', 'Johnny_1280x720_600', 'KristenAndSara_1280x720_600'] else: vid_list = glob(osp.join(args.gt_dir, '*/')) vid_name_list = sorted([vid_path.split('/')[(- 2)] for vid_path in vid_list]) for vid_name in vid_name_list: gt_vid_dir = osp.join(args.gt_dir, vid_name) enh_vid_dir = osp.join(args.enh_dir, vid_name) if ((not osp.exists(enh_vid_dir)) and args.use_other): enh_vid_dir = osp.join(args.other_dir, vid_name) img_list = glob(osp.join(gt_vid_dir, '*.png')) img_name_list = sorted([img_path.split('/')[(- 1)] for img_path in img_list]) psnr_list = [] bar_ = tqdm(total=len(img_name_list)) for img_name in img_name_list: img_gt = cv2.imread(osp.join(gt_vid_dir, img_name)) img_enh = cv2.imread(osp.join(enh_vid_dir, img_name)) img_gt = return_y_from_bgr(img_gt).astype(float) img_enh = img_enh[(..., 0)] psnr = cal_psnr(img_enh, img_gt) psnr_list.append(psnr) bar_.update(1) bar_.close() df = pd.DataFrame(dict(psnr=psnr_list)) csv_path = osp.join(args.save_dir, f'{vid_name}.csv') df.to_csv(csv_path) print(f'saved to: {csv_path}') ave_psnr_list = [] inf_num_list = [] ignore_num_list = [] for vid_name in vid_name_list: df = pd.read_csv(osp.join(args.save_dir, f'{vid_name}.csv')) valid_psnr_list = [] inf_num = 0 ignore_num = 0 for (idx_psnr, psnr) in enumerate(df['psnr']): if (psnr == np.inf): inf_num += 1 if ((vid_name in args.ignored_frms) and (idx_psnr in args.ignored_frms[vid_name])): ignore_num += 1 else: valid_psnr_list.append(psnr) inf_num_list.append(inf_num) ignore_num_list.append(ignore_num) ave_psnr_list.append(np.mean(valid_psnr_list)) df = pd.DataFrame(dict(vid_name=vid_name_list, psnr=ave_psnr_list, inf_num=inf_num_list, ignore_num=ignore_num_list)) csv_path = osp.join(args.save_dir, 'ave.csv') df.to_csv(csv_path) print(f'saved to: {csv_path}')
class DataLoader(object): def __init__(self, path, sep='\t', seq_sep=',', label='label', rank_file=RANK_FILE, group_1_file=GROUP_1_FILE, group_2_file=GROUP_2_FILE): self.rank_df = None self.path = path self.sep = sep self.seq_sep = seq_sep self.label = label self.rank_file = rank_file self.group_1_file = group_1_file self.group_2_file = group_2_file self._load_data() (self.g1_df, self.g2_df) = self._load_groups() def _load_data(self): rank_file = os.path.join(self.path, self.rank_file) if os.path.exists(rank_file): if (self.rank_df is None): logging.info('load rank csv...') self.rank_df = pd.read_csv(rank_file, sep='\t') self.rank_df['q'] = 1 if ('uid' not in self.rank_df): raise ValueError('missing uid in header.') logging.info(('size of rank file: %d' % len(self.rank_df))) else: raise FileNotFoundError('No rank file found.') def _load_groups(self): '\n Load advantaged/disadvantaged group info file and split the all data dataframe\n into two group-dataframes\n :return: group 1 dataframe (advantaged), group 2 dataframe (disadvantaged)\n ' if (self.rank_df is None): self._load_data() group_1_file = os.path.join(self.path, self.group_1_file) group_2_file = os.path.join(self.path, self.group_2_file) if os.path.exists(group_1_file): logging.info('load group 1 info txt...') g1_df = pd.read_csv(group_1_file, sep='\t') else: raise FileNotFoundError('No Group 1 file found.') if os.path.exists(group_2_file): logging.info('load group 2 info txt...') g2_df = pd.read_csv(group_2_file, sep='\t') else: raise FileNotFoundError('No Group 2 file found.') if (('uid' in g1_df) and ('uid' in g2_df)): g1_user_list = list(set(g1_df['uid'].tolist())) g2_user_list = list(set(g2_df['uid'].tolist())) else: raise ValueError('No uid found in the group dataframe.') group_1_df = self.rank_df[self.rank_df['uid'].isin(g1_user_list)] group_2_df = self.rank_df[self.rank_df['uid'].isin(g2_user_list)] return (group_1_df, group_2_df)
class UGF(object): def __init__(self, data_loader, k, eval_metric_list, fairness_metric='f1', epsilon=0.05, logger=None, model_name='', group_name=''): "\n Train fairness model\n :param data_loader: Dataloader object\n :param k: k for top-K number of items to be selected from the entire list\n :param eval_metric_list: a list contains all the metrics to report\n :param fairness_metric: a string, the metric used for fairness constraint, default='f1'\n :param epsilon: the upper bound for the difference between two groups scores\n :param logger: logger for logging info\n " self.data_loader = data_loader self.dataset_name = data_loader.path.split('/')[(- 1)] self.k = k self.eval_metric_list = eval_metric_list self.fairness_metric = fairness_metric self.epsilon = epsilon self.model_name = model_name self.group_name = group_name if (logger is None): self.logger = create_logger() else: self.logger = logger @staticmethod def _check_df_format(df): '\n check if the input dataframe contains all the necessary columns\n :return: None\n ' expected_columns = ['uid', 'iid', 'score', 'label', 'q'] for c in expected_columns: if (c not in df.columns): raise KeyError(('Missing column ' + c)) @staticmethod def _build_fairness_optimizer(group_df_list, k, metric, name='UGF'): "\n Use Gurobi to build faireness optimizer\n :param group_df_list: a list contains dataframes from two groups\n :param k: an integer for the length of top-K list\n :param metric: the metric string for fairness constraint. e.g. 'f1', 'recall', 'precision'\n :param name: a string which is the name of this optimizer\n :return: the Gurobi model, a list of Qi*Si for obj function, a list of two group metric\n " try: m = gp.Model(name) var_score_list = [] metric_list = [] for df in group_df_list: df_group = df.groupby('uid') tmp_metric_list = [] tmp_var_score_list = [] for (uid, group) in df_group: tmp_var_list = [] tmp_var_label_list = [] score_list = group['score'].tolist() label_list = group['label'].tolist() item_list = group['iid'].tolist() for i in range(len(item_list)): var_name = ((str(uid) + '_') + str(item_list[i])) v = m.addVar(vtype=GRB.BINARY, name=var_name) tmp_var_list.append(v) tmp_var_score_list.append((score_list[i] * v)) tmp_var_label_list.append((label_list[i] * v)) m.addConstr((gp.quicksum(tmp_var_list) == k)) if (group['label'].sum() == 0): continue if (metric == 'recall'): tmp_metric_list.append((gp.quicksum(tmp_var_label_list) / group['label'].sum())) elif (metric == 'precision'): tmp_metric_list.append((gp.quicksum(tmp_var_label_list) / k)) elif (metric == 'f1'): f1 = ((2 * gp.quicksum(tmp_var_label_list)) / (group['label'].sum() + k)) tmp_metric_list.append(f1) else: raise ValueError('Unknown metric for optimizer building.') metric_list.append((gp.quicksum(tmp_metric_list) / len(tmp_metric_list))) var_score_list.extend(tmp_var_score_list) m.update() return (m, var_score_list, metric_list) except gp.GurobiError as e: print(((('Error code ' + str(e.errno)) + ': ') + str(e))) except AttributeError: print('Encountered an attribute error') pass @staticmethod def _format_result(model, df): '\n format the gurobi results to dataframe.\n :param model: optimized gurobi model\n :param df: the pandas dataframe to add the optimized results into\n :return: None\n ' for v in model.getVars(): v_s = v.varName.split('_') uid = int(v_s[0]) iid = int(v_s[1]) df.loc[(((df['uid'] == uid) & (df['iid'] == iid)), 'q')] = int(v.x) def _print_metrics(self, df, metrics, message='metric scores'): '\n Print out evaluation scores\n :param df: the dataframe contains the data for evaluation\n :param metrics: a list, contains the metrics to report\n :param message: a string, for print message\n :return: None\n ' results = evaluation_methods(df, metrics=metrics) r_string = '' for i in range(len(metrics)): r_string = ((((r_string + metrics[i]) + '=') + '{:.4f}'.format(results[i])) + ' ') print(((message + ': ') + r_string)) self.logger.info(((message + ': ') + r_string)) def train(self): '\n Train fairness model\n ' all_df = self.data_loader.rank_df.copy(deep=True) self._check_df_format(all_df) group_df_list = [self.data_loader.g1_df.copy(deep=True), self.data_loader.g2_df.copy(deep=True)] self.logger.info('Model:{} | Dataset:{} | Group:{} | Epsilon={} | K={} | GRU_metric={}'.format(self.model_name, self.dataset_name, self.group_name, self.epsilon, self.k, self.fairness_metric)) self._print_metrics(all_df, self.eval_metric_list, 'Before optimization overall scores ') self._print_metrics(group_df_list[0], self.eval_metric_list, 'Before optimization group 1 (active) scores ') self._print_metrics(group_df_list[1], self.eval_metric_list, 'Before optimization group 2 (inactive) scores') (m, var_score_list, metric_list) = self._build_fairness_optimizer(group_df_list, self.k, metric=self.fairness_metric, name='UGF_f1') m.addConstr(((metric_list[0] - metric_list[1]) <= self.epsilon)) m.addConstr(((metric_list[1] - metric_list[0]) <= self.epsilon)) m.setObjective(gp.quicksum(var_score_list), GRB.MAXIMIZE) m.optimize() self._format_result(m, all_df) group_df_list[0].drop(columns=['q'], inplace=True) group_df_list[0] = pd.merge(group_df_list[0], all_df, on=['uid', 'iid', 'score', 'label'], how='left') group_df_list[1].drop(columns=['q'], inplace=True) group_df_list[1] = pd.merge(group_df_list[1], all_df, on=['uid', 'iid', 'score', 'label'], how='left') self._print_metrics(all_df, self.eval_metric_list, 'After optimization overall metric scores ') self._print_metrics(group_df_list[0], self.eval_metric_list, 'After optimization group 1 (active) scores ') self._print_metrics(group_df_list[1], self.eval_metric_list, 'After optimization group 2 (inactive) scores ') self.logger.info('\n\n')
def mean_reciprocal_rank(rs): "Score is reciprocal of the rank of the first relevant item\n First element is 'rank 1'. Relevance is binary (nonzero is relevant).\n Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank\n >>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]\n >>> mean_reciprocal_rank(rs)\n 0.61111111111111105\n >>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])\n >>> mean_reciprocal_rank(rs)\n 0.5\n >>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]\n >>> mean_reciprocal_rank(rs)\n 0.75\n Args:\n rs: Iterator of relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Mean reciprocal rank\n " rs = (np.asarray(r).nonzero()[0] for r in rs) return np.mean([((1.0 / (r[0] + 1)) if r.size else 0.0) for r in rs])
def r_precision(r): 'Score is precision after all relevant documents have been retrieved\n Relevance is binary (nonzero is relevant).\n >>> r = [0, 0, 1]\n >>> r_precision(r)\n 0.33333333333333331\n >>> r = [0, 1, 0]\n >>> r_precision(r)\n 0.5\n >>> r = [1, 0, 0]\n >>> r_precision(r)\n 1.0\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n R Precision\n ' r = (np.asarray(r) != 0) z = r.nonzero()[0] if (not z.size): return 0.0 return np.mean(r[:(z[(- 1)] + 1)])
def precision_at_k(r, k): 'Score is precision @ k\n Relevance is binary (nonzero is relevant).\n >>> r = [0, 0, 1]\n >>> precision_at_k(r, 1)\n 0.0\n >>> precision_at_k(r, 2)\n 0.0\n >>> precision_at_k(r, 3)\n 0.33333333333333331\n >>> precision_at_k(r, 4)\n Traceback (most recent call last):\n File "<stdin>", line 1, in ?\n ValueError: Relevance score length < k\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Precision @ k\n Raises:\n ValueError: len(r) must be >= k\n ' assert (k >= 1) r = (np.asarray(r)[:k] != 0) if (r.size != k): raise ValueError('Relevance score length < k') return np.mean(r)
def average_precision(r): 'Score is average precision (area under PR curve)\n Relevance is binary (nonzero is relevant).\n >>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]\n >>> delta_r = 1. / sum(r)\n >>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])\n 0.7833333333333333\n >>> average_precision(r)\n 0.78333333333333333\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Average precision\n ' r = (np.asarray(r) != 0) out = [precision_at_k(r, (k + 1)) for k in range(r.size) if r[k]] if (not out): return 0.0 return np.mean(out)
def mean_average_precision(rs): 'Score is mean average precision\n Relevance is binary (nonzero is relevant).\n >>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]\n >>> mean_average_precision(rs)\n 0.78333333333333333\n >>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]\n >>> mean_average_precision(rs)\n 0.39166666666666666\n Args:\n rs: Iterator of relevance scores (list or numpy) in rank order\n (first element is the first item)\n Returns:\n Mean average precision\n ' return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=0): 'Score is discounted cumulative gain (dcg)\n Relevance is positive real values. Can use binary\n as the previous methods.\n Example from\n http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf\n >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]\n >>> dcg_at_k(r, 1)\n 3.0\n >>> dcg_at_k(r, 1, method=1)\n 3.0\n >>> dcg_at_k(r, 2)\n 5.0\n >>> dcg_at_k(r, 2, method=1)\n 4.2618595071429155\n >>> dcg_at_k(r, 10)\n 9.6051177391888114\n >>> dcg_at_k(r, 11)\n 9.6051177391888114\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n k: Number of results to consider\n method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]\n If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]\n Returns:\n Discounted cumulative gain\n ' r = np.asfarray(r)[:k] if r.size: if (method == 0): return (r[0] + np.sum((r[1:] / np.log2(np.arange(2, (r.size + 1)))))) elif (method == 1): return np.sum((r / np.log2(np.arange(2, (r.size + 2))))) else: raise ValueError('method must be 0 or 1.') return 0.0
def ndcg_at_k(r, k, method=0): 'Score is normalized discounted cumulative gain (ndcg)\n Relevance is positive real values. Can use binary\n as the previous methods.\n Example from\n http://www.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf\n >>> r = [3, 2, 3, 0, 0, 1, 2, 2, 3, 0]\n >>> ndcg_at_k(r, 1)\n 1.0\n >>> r = [2, 1, 2, 0]\n >>> ndcg_at_k(r, 4)\n 0.9203032077642922\n >>> ndcg_at_k(r, 4, method=1)\n 0.96519546960144276\n >>> ndcg_at_k([0], 1)\n 0.0\n >>> ndcg_at_k([1], 2)\n 1.0\n Args:\n r: Relevance scores (list or numpy) in rank order\n (first element is the first item)\n k: Number of results to consider\n method: If 0 then weights are [1.0, 1.0, 0.6309, 0.5, 0.4307, ...]\n If 1 then weights are [1.0, 0.6309, 0.5, 0.4307, ...]\n Returns:\n Normalized discounted cumulative gain\n ' dcg_max = dcg_at_k(sorted(r, reverse=True), k, method) if (not dcg_max): return 0.0 return (dcg_at_k(r, k, method) / dcg_max)
def create_logger(name='result_logger', path='results.log'): logger = logging.getLogger(name) logger.setLevel(logging.INFO) file_handler = logging.FileHandler(path) formatter = logging.Formatter('%(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) return logger
def evaluation_methods(df, metrics): '\n Generate evaluation scores\n :param df:\n :param metrics:\n :return:\n ' evaluations = [] data_df = df.copy(deep=True) data_df['q*s'] = (data_df['q'] * data_df['score']) for metric in metrics: k = int(metric.split('@')[(- 1)]) tmp_df = data_df.sort_values(by='q*s', ascending=False, ignore_index=True) df_group = tmp_df.groupby('uid') if metric.startswith('ndcg@'): ndcgs = [] for (uid, group) in df_group: ndcgs.append(ndcg_at_k(group['label'].tolist()[:k], k=k, method=1)) evaluations.append(np.average(ndcgs)) elif metric.startswith('hit@'): hits = [] for (uid, group) in df_group: hits.append(int((np.sum(group['label'][:k]) > 0))) evaluations.append(np.average(hits)) elif metric.startswith('precision@'): precisions = [] for (uid, group) in df_group: if (len(group['label'].tolist()) < k): print(group) print(uid) precisions.append(precision_at_k(group['label'].tolist()[:k], k=k)) evaluations.append(np.average(precisions)) elif metric.startswith('recall@'): recalls = [] for (uid, group) in df_group: if (np.sum(group['label']) == 0): continue recalls.append(((1.0 * np.sum(group['label'][:k])) / np.sum(group['label']))) evaluations.append(np.average(recalls)) elif metric.startswith('f1@'): f1 = [] for (uid, group) in df_group: if (np.sum(group['label']) == 0): continue f1.append(((2 * np.sum(group['label'][:k])) / (np.sum(group['label']) + k))) evaluations.append(np.average(f1)) return evaluations
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--batch-size', help='The size of the mini batches', default=8, required=False, type=int) parser.add_argument('--language', help='The language to use', required=True, type=str) parser.add_argument('--ud-path', help='The path to raw ud data', default='data/ud/ud-treebanks-v2.5/', required=False, type=str) parser.add_argument('--output-path', help='The path to save processed data', default='data/processed/', required=False, type=str) args = parser.parse_args() logging.info(args) return args
def get_ud_file_base(ud_path, language): return os.path.join(ud_path, UD_LIST[language])
def get_data_file_base(output_path, language): output_path = os.path.join(output_path, language) util.mkdir(output_path) return os.path.join(output_path, '%s--%s.pickle.bz2')
def load_bert(bert_name): bert_tokenizer = BertTokenizer.from_pretrained(bert_name) bert_model = BertModel.from_pretrained(bert_name).to(device=constants.device) bert_model.eval() return (bert_tokenizer, bert_model)
def tokenize_ud(file_name, bert_tokenizer): all_ud_tokens = [] all_bert_tokens = [] all_bert2target_map = [] all_tree_matrices = [] all_ud_data = [] with open(file_name, 'r', encoding='utf-8') as file: for token_list in parse_incr(file): ud_tokens = [] ud_data = [] for t in token_list: ud_tokens.append(t['form']) ud_data.append({'word': t['form'], 'pos': t['upostag'], 'head': t['head'], 'rel': t['deprel']}) ud2bert_mapping = [] bert_tokens = [] for token in ud_tokens: bert_decomposition = bert_tokenizer.tokenize(token) if (len(bert_decomposition) == 0): bert_decomposition = ['[UNK]'] bert_tokens += bert_decomposition ud2bert_mapping.append(len(bert_decomposition)) if (not (len(bert_tokens) > 510)): all_ud_tokens.append(ud_tokens) all_bert2target_map.append(ud2bert_mapping) all_bert_tokens.append(bert_tokens) all_ud_data.append(ud_data) return (all_ud_tokens, all_bert_tokens, all_bert2target_map, all_ud_data)
def embed_bert(all_bert_tokens, batch_size, model, bert_tokenizer): all_bert_embeddings = [] batch_num = 0 for batch_start in range(0, len(all_bert_tokens), batch_size): batch_num += 1 if ((batch_num % 10) == 0): logging.info('Processing batch {} to embeddings'.format(batch_num)) batch_end = (batch_start + batch_size) batch = all_bert_tokens[batch_start:batch_end] lengths = [(len(sentence) + 2) for sentence in batch] longest_sent = max(lengths) padded_line = (['[PAD]'] * longest_sent) attention_off = ([0] * longest_sent) attention_on = ([1] * longest_sent) padded_batch = [] attention_mask = [] for sentence in range(len(batch)): sentence_len = lengths[sentence] padded_batch.append(bert_tokenizer.convert_tokens_to_ids((((['[CLS]'] + batch[sentence]) + ['[SEP]']) + padded_line[sentence_len:]))) attention_mask.append((attention_on[:sentence_len] + attention_off[sentence_len:])) input_ids = torch.tensor(padded_batch).to(device=constants.device) attention_mask_tensor = torch.tensor(attention_mask).to(device=constants.device) (outputs, _) = model(input_ids, attention_mask=attention_mask_tensor) last_hidden_states = [x[1:(lengths[i] - 1)] for (i, x) in enumerate(outputs.cpu().numpy())] all_bert_embeddings += last_hidden_states return all_bert_embeddings
def process_bert_token(token): if token.startswith('##'): return token[2:] return token
def check_bert_word(word, bert_tokens, target_tokens): word_bert = ''.join([process_bert_token(token) for token in bert_tokens]) if (word_bert == word): return True logging.warning("Failed to embed '{}' from BERT tokens {} in sentence {}".format(word, '+'.join(bert_tokens), '+'.join(target_tokens))) saved = [] return False
def combine_bert(all_target_token, all_bert2target_map, all_bert_tokens, all_bert_embeddings): output_embeddings = [] output_words = [] sentence_num = 0 for sentence in range(len(all_target_token)): sentence_num += 1 if ((sentence_num % 10000) == 0): logging.info('Re-merging and saving sentence {}'.format(sentence_num)) target_tokens = all_target_token[sentence] target_tokens_mapping = all_bert2target_map[sentence] bert_tokens = all_bert_tokens[sentence] last_hidden_states = all_bert_embeddings[sentence] bert_index = 0 saved = [] words = [] (bert_index_start, bert_index_end) = (0, 0) for target_index in range(0, len(target_tokens)): assert (len(target_tokens_mapping) == len(target_tokens)) num_of_bert_toks = target_tokens_mapping[target_index] word = target_tokens[target_index] bert_index_end += num_of_bert_toks embedding = last_hidden_states[bert_index_start:bert_index_end].mean(axis=0) saved.append(embedding) words.append(word) bert_index_start = bert_index_end output_embeddings.append(saved) output_words.append(words) return (output_embeddings, output_words)
def load_fasttext(language): lang = constants.LANGUAGE_CODES[language] ft_path = 'data/fasttext' ft_fname = os.path.join(ft_path, ('cc.%s.300.bin' % lang)) if (not os.path.exists(ft_fname)): logging.info('Downloading fasttext model') temp_fname = fasttext.util.download_model(lang, if_exists='ignore') util.mkdir(ft_path) os.rename(temp_fname, ft_fname) os.rename((temp_fname + '.gz'), (ft_fname + '.gz')) logging.info('Loading fasttext model') return fasttext.load_model(ft_fname)
def get_fasttext(fasttext_model, words): embeddings = [[fasttext_model[word] for word in sentence] for sentence in words] return embeddings
def process_file(bert_model, bert_tokenizer, fasttext_model, batch_size, language, ud_file, output_file): logging.info('Processing file {}'.format(ud_file)) logging.info('PHASE ONE: reading file and tokenizing') (all_target_tokens, all_bert_tokens, all_bert2target_map, all_ud) = tokenize_ud(ud_file, bert_tokenizer) logging.info('PHASE TWO: padding, batching, and embedding for bert') all_bert_embeddings = embed_bert(all_bert_tokens, batch_size, bert_model, bert_tokenizer) logging.info('PHASE THREE: re-merging BERT tokens') (bert_embeddings, words) = combine_bert(all_target_tokens, all_bert2target_map, all_bert_tokens, all_bert_embeddings) del all_target_tokens, all_bert2target_map, all_bert_tokens, all_bert_embeddings logging.info('PHASE FOUR: getting fasttext embeddings') fast_embeddings = get_fasttext(fasttext_model, words) logging.info('PHASE FIVE: saving') output_data_raw = list(zip(bert_embeddings, fast_embeddings, all_ud, words)) del bert_embeddings, fast_embeddings, all_ud, words output_data = [(bert_embs, fast_embs, ud, words) for (bert_embs, fast_embs, ud, words) in output_data_raw if (bert_embs != [])] del output_data_raw output_ud = [(ud, words) for (_, _, ud, words) in output_data] output_bert = [(bert_embs, words) for (bert_embs, _, _, words) in output_data] output_fast = [(fast_embs, words) for (_, fast_embs, _, words) in output_data] del output_data util.write_data((output_file % 'ud'), output_ud) del output_ud util.write_data((output_file % 'fast'), output_fast) del output_fast util.write_data((output_file % 'bert'), output_bert) del output_bert logging.info('Completed {}'.format(ud_file))
def process(language, ud_path, batch_size, bert_name, output_path): logging.info('Loading pre-trained BERT network') (bert_tokenizer, bert_model) = load_bert(bert_name) fasttext_model = load_fasttext(language) logging.info(('Precessing language %s' % language)) ud_file_base = get_ud_file_base(ud_path, language) output_file_base = get_data_file_base(output_path, language) for mode in ['train', 'dev', 'test']: ud_file = (ud_file_base % mode) output_file = (output_file_base % (mode, '%s')) process_file(bert_model, bert_tokenizer, fasttext_model, batch_size, language, ud_file, output_file) logging.info('Process finished')
def main(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(processName)s : %(message)s', level=logging.INFO) args = get_args() batch_size = args.batch_size language = args.language ud_path = args.ud_path output_path = args.output_path bert_name = 'bert-base-multilingual-cased' with torch.no_grad(): process(language, ud_path, batch_size, bert_name, output_path)
def generate_batch(batch): x = torch.cat([item[0].unsqueeze(0) for item in batch], dim=0) y = torch.cat([item[1].unsqueeze(0) for item in batch], dim=0) (x, y) = (x.to(device=constants.device), y.to(device=constants.device)) return (x, y)
def get_data_cls(task): if (task == 'pos_tag'): return PosTagDataset if (task == 'dep_label'): return DepLabelDataset
def get_data_loader(dataset_cls, data_path, language, representations, pca_size, mode, batch_size, shuffle, pca=None, classes=None, words=None): trainset = dataset_cls(data_path, language, representations, pca_size, mode, pca=pca, classes=classes, words=words) trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=shuffle, collate_fn=generate_batch) return (trainloader, trainset.pca, trainset.classes, trainset.words)
def get_data_loaders(data_path, task, language, representations, pca_size, batch_size): dataset_cls = get_data_cls(task) (trainloader, pca, classes, words) = get_data_loader(dataset_cls, data_path, language, representations, pca_size, 'train', batch_size=batch_size, shuffle=True) (devloader, _, classes, words) = get_data_loader(dataset_cls, data_path, language, representations, pca_size, 'dev', batch_size=batch_size, shuffle=False, pca=pca, classes=classes, words=words) (testloader, _, classes, words) = get_data_loader(dataset_cls, data_path, language, representations, pca_size, 'test', batch_size=batch_size, shuffle=False, pca=pca, classes=classes, words=words) return (trainloader, devloader, testloader, testloader.dataset.n_classes, testloader.dataset.n_words)
class DepLabelDataset(PosTagDataset): def load_data_index(self): data_ud = util.read_data((self.input_name_base % (self.mode, 'ud'))) (x_raw, y_raw) = ([], []) for (sentence_ud, words) in data_ud: for (i, token) in enumerate(sentence_ud): head = token['head'] rel = token['rel'] if ((rel == '_') or (rel == 'root')): continue x_raw_tail = words[i] x_raw_head = words[(head - 1)] x_raw += [[x_raw_tail, x_raw_head]] y_raw += [rel] x_raw = np.array(x_raw) y_raw = np.array(y_raw) return (x_raw, y_raw) def load_index(self, x_raw, words=None): if (words is None): words = [] new_words = sorted(list((set(np.unique(x_raw)) - set(words)))) if new_words: words = np.concatenate([words, new_words]) words_dict = {word: i for (i, word) in enumerate(words)} x = np.array([[words_dict[token] for token in tokens] for tokens in x_raw]) self.x = torch.from_numpy(x) self.words = words self.n_words = len(words) def load_data(self): data_ud = util.read_data((self.input_name_base % (self.mode, 'ud'))) data_embeddings = util.read_data((self.input_name_base % (self.mode, self.representation))) (x_raw, y_raw) = ([], []) for ((sentence_ud, words), (sentence_emb, _)) in zip(data_ud, data_embeddings): for (i, token) in enumerate(sentence_ud): head = token['head'] rel = token['rel'] if ((rel == '_') or (rel == 'root')): continue x_raw_tail = sentence_emb[i] x_raw_head = sentence_emb[(head - 1)] x_raw += [np.concatenate([x_raw_tail, x_raw_head])] y_raw += [rel] x_raw = np.array(x_raw) y_raw = np.array(y_raw) return (x_raw, y_raw)
class PosTagDataset(Dataset): def __init__(self, data_path, language, representation, embedding_size, mode, pca=None, classes=None, words=None): self.data_path = data_path self.language = language self.mode = mode self.representation = representation self.embedding_size = embedding_size self.input_name_base = get_file_names(data_path, language) self.process(pca, classes, words) assert (self.x.shape[0] == self.y.shape[0]) self.n_instances = self.x.shape[0] def process(self, pca, classes, words): if (self.representation in ['bert', 'fast']): self._process(pca, classes) self.words = words self.n_words = None else: self._process_index(classes, words) self.pca = pca def _process_index(self, classes, words): (x_raw, y_raw) = self.load_data_index() self.load_index(x_raw, words=words) self.load_classes(y_raw, classes=classes) def load_data_index(self): data_ud = util.read_data((self.input_name_base % (self.mode, 'ud'))) (x_raw, y_raw) = ([], []) for (sentence_ud, words) in data_ud: for (i, token) in enumerate(sentence_ud): pos_tag = token['pos'] if ((pos_tag == '_') or (pos_tag == 'X')): continue x_raw += [words[i]] y_raw += [pos_tag] x_raw = np.array(x_raw) y_raw = np.array(y_raw) return (x_raw, y_raw) def load_index(self, x_raw, words=None): if (words is None): (x, words) = pd.factorize(x_raw, sort=True) else: new_words = (set(x_raw) - set(words)) if new_words: words = np.concatenate([words, list(new_words)]) words_dict = {word: i for (i, word) in enumerate(words)} x = np.array([words_dict[token] for token in x_raw]) self.x = torch.from_numpy(x) self.words = words self.n_words = len(words) def _process(self, pca, classes): (x_raw, y_raw) = self.load_data() self.load_embeddings(x_raw, pca=pca) self.load_classes(y_raw, classes=classes) def load_data(self): data_ud = util.read_data((self.input_name_base % (self.mode, 'ud'))) data_embeddings = util.read_data((self.input_name_base % (self.mode, self.representation))) (x_raw, y_raw) = ([], []) for ((sentence_ud, words), (sentence_emb, _)) in zip(data_ud, data_embeddings): for (i, token) in enumerate(sentence_ud): pos_tag = token['pos'] if ((pos_tag == '_') or (pos_tag == 'X')): continue x_raw += [sentence_emb[i]] y_raw += [pos_tag] x_raw = np.array(x_raw) y_raw = np.array(y_raw) return (x_raw, y_raw) def load_embeddings(self, x_raw, pca=None): pca_x = x_raw self.assert_size(pca_x) self.x = torch.from_numpy(pca_x) self.pca = pca def assert_size(self, x): assert (len(x[0]) == self.embedding_size) def load_classes(self, y_raw, classes=None): if (self.mode != 'train'): assert (classes is not None) if (classes is None): (y, classes) = pd.factorize(y_raw, sort=True) else: new_classes = (set(y_raw) - set(classes)) if new_classes: classes = np.concatenate([classes, list(new_classes)]) classes_dict = {pos_class: i for (i, pos_class) in enumerate(classes)} y = np.array([classes_dict[token] for token in y_raw]) self.y = torch.from_numpy(y) self.classes = classes self.n_classes = classes.shape[0] def __len__(self): return self.n_instances def __getitem__(self, index): return (self.x[index], self.y[index])
class BaseModel(nn.Module, ABC): name = 'base' def __init__(self): super().__init__() self.best_state_dict = None def set_best(self): self.best_state_dict = copy.deepcopy(self.state_dict()) def recover_best(self): self.load_state_dict(self.best_state_dict) def save(self, path): fname = self.get_name(path) torch.save({'kwargs': self.get_args(), 'model_state_dict': self.state_dict()}, fname) @abstractmethod def get_args(self): pass @classmethod def load(cls, path): checkpoints = cls.load_checkpoint(path) model = cls(**checkpoints['kwargs']) model.load_state_dict(checkpoints['model_state_dict']) return model @classmethod def load_checkpoint(cls, path): fname = cls.get_name(path) return torch.load(fname, map_location=constants.device) @classmethod def get_name(cls, path): return ('%s/model.tch' % path)
class TransparentDataParallel(nn.DataParallel): def set_best(self, *args, **kwargs): return self.module.set_best(*args, **kwargs) def recover_best(self, *args, **kwargs): return self.module.recover_best(*args, **kwargs) def save(self, *args, **kwargs): return self.module.save(*args, **kwargs) def train_batch(self, *args, **kwargs): return self.module.train_batch(*args, **kwargs) def eval_batch(self, *args, **kwargs): return self.module.eval_batch(*args, **kwargs)
class MLP(BaseModel): name = 'mlp' def __init__(self, task, embedding_size=768, n_classes=3, hidden_size=5, nlayers=1, dropout=0.1, representation=None, n_words=None): super().__init__() self.dropout_p = dropout self.embedding_size = embedding_size self.hidden_size = hidden_size self.nlayers = nlayers self.n_classes = n_classes self.representation = representation self.n_words = n_words self.task = task if (self.representation in ['onehot', 'random']): self.build_embeddings(n_words, embedding_size) self.mlp = self.build_mlp() self.out = nn.Linear(self.final_hidden_size, n_classes) self.dropout = nn.Dropout(dropout) self.criterion = nn.CrossEntropyLoss() def build_embeddings(self, n_words, embedding_size): if (self.task == 'dep_label'): self.embedding_size = (int((embedding_size / 2)) * 2) self.embedding = nn.Embedding(n_words, int((embedding_size / 2))) else: self.embedding = nn.Embedding(n_words, embedding_size) if (self.representation == 'random'): self.embedding.weight.requires_grad = False def build_mlp(self): src_size = self.embedding_size tgt_size = self.hidden_size mlp = [] for layer in range(self.nlayers): mlp += [nn.Linear(src_size, tgt_size)] mlp += [nn.ReLU()] mlp += [nn.Dropout(self.dropout_p)] (src_size, tgt_size) = (tgt_size, int((tgt_size / 2))) self.final_hidden_size = src_size return nn.Sequential(*mlp) def forward(self, x): if (self.representation in ['onehot', 'random']): x = self.get_embeddings(x) x_emb = self.dropout(x) x = self.mlp(x_emb) logits = self.out(x) return logits def get_embeddings(self, x): x_emb = self.embedding(x) if (len(x.shape) > 1): x_emb = x_emb.reshape(x.shape[0], (- 1)) return x_emb def train_batch(self, data, target, optimizer, criterion): optimizer.zero_grad() mlp_out = self(data) loss = self.criterion(mlp_out, target) loss.backward() optimizer.step() return (loss.item() / math.log(2)) def eval_batch(self, data, target): mlp_out = self(data) loss = (self.criterion(mlp_out, target) / math.log(2)) accuracy = (mlp_out.argmax(dim=(- 1)) == target).float().detach().sum() loss = (loss.item() * data.shape[0]) return (loss, accuracy) def get_args(self): return {'nlayers': self.nlayers, 'hidden_size': self.hidden_size, 'embedding_size': self.embedding_size, 'dropout': self.dropout_p, 'n_classes': self.n_classes, 'representation': self.representation, 'n_words': self.n_words, 'task': self.task}
def args2list(args): return ['--data-path', str(args.data_path), '--task', str(args.task), '--language', str(args.language), '--batch-size', str(args.batch_size), '--representation', str(args.representation), '--eval-batches', str(args.eval_batches), '--wait-epochs', str(args.wait_epochs), '--checkpoint-path', str(args.checkpoint_path), '--seed', str(args.seed)]
def get_hyperparameters(search): hyperparameters = {'--hidden-size': search[0], '--nlayers': search[1], '--dropout': search[2], '--pca-size': search[3]} return dict2list(hyperparameters)
def get_hyperparameters_search(n_runs, representation): bert_pca_size = list([768]) fast_pca_size = list([300]) onehot_pca_size = list({int((2 ** x)) for x in np.arange(5.6, 8.2, 0.01)}) hidden_size = list({int((2 ** x)) for x in np.arange(2, 9, 0.01)}) nlayers = [1, 2, 3] dropout = list(np.arange(0.0, 0.51, 0.01)) if (representation in ['onehot', 'random']): pca_size = onehot_pca_size elif (representation == 'fast'): pca_size = fast_pca_size elif (representation == 'bert'): pca_size = bert_pca_size else: raise ValueError(('Invalid representation %s' % representation)) all_hyper = [hidden_size, nlayers, dropout, pca_size] grid = list(itertools.product(*all_hyper)) return random.sample(grid, n_runs)
def dict2list(data): list2d = [[k, str(x)] for (k, x) in data.items()] return list(itertools.chain.from_iterable(list2d))
def write_done(done_fname): with open(done_fname, 'w') as f: f.write('done training\n')
def append_result(fname, values): with open(fname, 'a+') as f: f.write((','.join(values) + '\n'))
def get_results(out, err): loss_pattern = '^Final loss. Train: (\\d.\\d+) Dev: (\\d.\\d+) Test: (\\d.\\d+)$' acc_pattern = '^Final acc. Train: (\\d.\\d+) Dev: (\\d.\\d+) Test: (\\d.\\d+)$' output = out.decode().split('\n') try: m = re.match(loss_pattern, output[(- 3)]) (train_loss, dev_loss, test_loss) = m.groups() m = re.match(acc_pattern, output[(- 2)]) (train_acc, dev_acc, test_acc) = m.groups() except: print('Output:', output) raise ValueError(('Error in subprocess: %s' % err.decode())) return [train_loss, dev_loss, test_loss, train_acc, dev_acc, test_acc]
def main(): args = get_args() n_runs = 50 ouput_path = os.path.join(args.checkpoint_path, args.task, args.language, args.representation) results_fname = os.path.join(ouput_path, 'all_results.txt') done_fname = os.path.join(ouput_path, 'finished.txt') curr_iter = (util.file_len(results_fname) - 1) util.mkdir(ouput_path) if (curr_iter == (- 1)): res_columns = ['hidden_size', 'nlayers', 'dropout', 'pca_size', 'train_loss', 'dev_loss', 'test_loss', 'train_acc', 'dev_acc', 'test_acc'] append_result(results_fname, res_columns) curr_iter = 0 search = get_hyperparameters_search(n_runs, args.representation) for hyper in tqdm(search[curr_iter:], initial=curr_iter, total=n_runs): hyperparameters = get_hyperparameters(hyper) my_env = os.environ.copy() cmd = ((['python', 'src/h02_learn/train.py'] + args2list(args)) + hyperparameters) tqdm.write(str(hyperparameters)) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env) (out, err) = process.communicate() results = get_results(out, err) append_result(results_fname, ([str(x) for x in hyper] + results)) write_done(done_fname)
def get_model_name(args): fpath = ('nl_%d-es_%d-hs_%d-d_%.4f' % (args.nlayers, args.pca_size, args.hidden_size, args.dropout)) return fpath
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, required=True) parser.add_argument('--language', type=str, required=True) parser.add_argument('--task', type=str, required=True) parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--representation', type=str, required=True) parser.add_argument('--nlayers', type=int, default=2) parser.add_argument('--pca-size', type=int, default=300) parser.add_argument('--hidden-size', type=int, default=128) parser.add_argument('--dropout', type=float, default=0.33) parser.add_argument('--eval-batches', type=int, default=100) parser.add_argument('--wait-epochs', type=int, default=20) parser.add_argument('--checkpoint-path', type=str, required=True) parser.add_argument('--seed', type=int, default=20) args = parser.parse_args() args.wait_iterations = (args.wait_epochs * args.eval_batches) args.save_path = ('%s/%s/%s/%s/%s' % (args.checkpoint_path, args.task, args.language, args.representation, get_model_name(args))) util.config(args.seed) print(args) if (args.representation == 'bert'): args.pca_size = 768 elif (args.representation == 'fast'): args.pca_size = 300 if (args.task == 'dep_label'): args.pca_size = (args.pca_size * 2) return args
def get_model(n_classes, n_words, args): mlp = MLP(args.task, embedding_size=args.pca_size, n_classes=n_classes, hidden_size=args.hidden_size, nlayers=args.nlayers, dropout=args.dropout, representation=args.representation, n_words=n_words) if (torch.cuda.device_count() > 1): mlp = TransparentDataParallel(mlp) return mlp.to(device=constants.device)
def _evaluate(evalloader, model): criterion = nn.CrossEntropyLoss().to(device=constants.device) (dev_loss, dev_acc) = (0, 0) for (x, y) in evalloader: (loss, acc) = model.eval_batch(x, y) dev_loss += loss dev_acc += acc n_instances = len(evalloader.dataset) return {'loss': (dev_loss / n_instances), 'acc': (dev_acc / n_instances)}
def evaluate(evalloader, model): model.eval() with torch.no_grad(): result = _evaluate(evalloader, model) model.train() return result
def train_epoch(trainloader, devloader, model, optimizer, criterion, train_info): for (x, y) in trainloader: loss = model.train_batch(x, y, optimizer, criterion) train_info.new_batch(loss) if train_info.eval: dev_results = evaluate(devloader, model) if train_info.is_best(dev_results): model.set_best() elif train_info.finish: train_info.print_progress(dev_results) return train_info.print_progress(dev_results)
def train(trainloader, devloader, model, eval_batches, wait_iterations): optimizer = optim.Adam(model.parameters()) criterion = nn.CrossEntropyLoss().to(device=constants.device) with tqdm(total=wait_iterations) as pbar: train_info = TrainInfo(pbar, wait_iterations, eval_batches) while (not train_info.finish): train_epoch(trainloader, devloader, model, optimizer, criterion, train_info) model.recover_best()
def eval_all(model, trainloader, devloader, testloader): train_results = evaluate(trainloader, model) dev_results = evaluate(devloader, model) test_results = evaluate(testloader, model) print(('Final loss. Train: %.4f Dev: %.4f Test: %.4f' % (train_results['loss'], dev_results['loss'], test_results['loss']))) print(('Final acc. Train: %.4f Dev: %.4f Test: %.4f' % (train_results['acc'], dev_results['acc'], test_results['acc']))) return (train_results, dev_results, test_results)
def save_results(model, train_results, dev_results, test_results, results_fname): results = [['n_classes', 'embedding_size', 'hidden_size', 'nlayers', 'dropout_p', 'train_loss', 'dev_loss', 'test_loss', 'train_acc', 'dev_acc', 'test_acc']] results += [[model.n_classes, model.embedding_size, model.hidden_size, model.nlayers, model.dropout_p, train_results['loss'], dev_results['loss'], test_results['loss'], train_results['acc'], dev_results['acc'], test_results['acc']]] util.write_csv(results_fname, results)
def save_checkpoints(model, train_results, dev_results, test_results, save_path): util.mkdir(save_path) model.save(save_path) results_fname = (save_path + '/results.csv') save_results(model, train_results, dev_results, test_results, results_fname)
def main(): args = get_args() (trainloader, devloader, testloader, n_classes, n_words) = get_data_loaders(args.data_path, args.task, args.language, args.representation, args.pca_size, args.batch_size) print(('Language: %s Train size: %d Dev size: %d Test size: %d' % (args.language, len(trainloader.dataset), len(devloader.dataset), len(testloader.dataset)))) print(args) model = get_model(n_classes, n_words, args) train(trainloader, devloader, model, args.eval_batches, args.wait_iterations) (train_results, dev_results, test_results) = eval_all(model, trainloader, devloader, testloader) save_checkpoints(model, train_results, dev_results, test_results, args.save_path)
class TrainInfo(): batch_id = 0 running_loss = [] best_loss = float('inf') best_batch = 0 def __init__(self, pbar, wait_iterations, eval_batches): self.pbar = pbar self.wait_iterations = wait_iterations self.eval_batches = eval_batches @property def finish(self): return ((self.batch_id - self.best_batch) >= self.wait_iterations) @property def eval(self): return ((self.batch_id % self.eval_batches) == 0) @property def max_epochs(self): return (self.best_batch + self.wait_iterations) @property def avg_loss(self): return (sum(self.running_loss) / len(self.running_loss)) def new_batch(self, loss): self.batch_id += 1 self.pbar.update(1) self.running_loss += [loss] def is_best(self, dev_results): dev_loss = dev_results['loss'] if (dev_loss < self.best_loss): self.best_loss = dev_loss self.best_batch = self.batch_id self.pbar.total = self.max_epochs return True return False def reset_loss(self): self.running_loss = [] def print_progress(self, dev_results): dev_loss = dev_results['loss'] dev_acc = dev_results['acc'] self.pbar.set_description(('Training loss: %.4f Dev loss: %.4f acc: %.4f' % (self.avg_loss, dev_loss, dev_acc))) self.reset_loss()
def config(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed)
def write_csv(filename, results): with io.open(filename, 'w', encoding='utf8') as f: writer = csv.writer(f, delimiter=',') writer.writerows(results)
def write_data(filename, data): with open(filename, 'wb') as f: pickle.dump(data, f)
def read_data(filename): with open(filename, 'rb') as f: data = pickle.load(f) return data
def rmdir_if_exists(fdir): if os.path.exists(fdir): shutil.rmtree(fdir)
def file_len(fname): if (not os.path.isfile(fname)): return 0 with open(fname, 'r') as f: for (i, l) in enumerate(f): pass return (i + 1)
def mkdir(folder): pathlib.Path(folder).mkdir(parents=True, exist_ok=True)
@dataclass class MelConfig(): n_mels: int = 128 sample_rate: int = 24000 win_length: int = 1024 hop_length: int = 256
@dataclass class DiffusionConfig(): in_channels: int = 128 residual_layers: int = 30 residual_channels: int = 128 dilation_cycle_length: int = 10 num_diffusion_steps: int = 50 sample_rate: int = 24000 win_length: int = 1024 hop_length: int = 256
@dataclass class GANConfig(): in_channels: int = 128 upsample_in_channels: int = 1536 upsample_strides: List[int] = field(default_factory=(lambda : [4, 4, 2, 2, 2, 2])) resblock_kernel_sizes: List[int] = field(default_factory=(lambda : [3, 7, 11])) resblock_dilations: List[List[int]] = field(default_factory=(lambda : [[1, 3, 5], [1, 3, 5], [1, 3, 5]])) sample_rate: int = 24000 win_length: int = 1024 hop_length: int = 256