| |
| import argparse |
| import os |
| import os.path as osp |
|
|
| import mmengine |
| from mmengine.config import Config, DictAction |
| from mmengine.hooks import Hook |
| from mmengine.runner import Runner |
|
|
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser( |
| description='MMPose test (and eval) model') |
| parser.add_argument('config', help='test config file path') |
| parser.add_argument('checkpoint', help='checkpoint file') |
| parser.add_argument( |
| '--work-dir', help='the directory to save evaluation results') |
| parser.add_argument('--out', help='the file to save metric results.') |
| parser.add_argument( |
| '--dump', |
| type=str, |
| help='dump predictions to a pickle file for offline evaluation') |
| parser.add_argument( |
| '--cfg-options', |
| nargs='+', |
| action=DictAction, |
| default={}, |
| help='override some settings in the used config, the key-value pair ' |
| 'in xxx=yyy format will be merged into config file. For example, ' |
| "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") |
| parser.add_argument( |
| '--show-dir', |
| help='directory where the visualization images will be saved.') |
| parser.add_argument( |
| '--show', |
| action='store_true', |
| help='whether to display the prediction results in a window.') |
| parser.add_argument( |
| '--interval', |
| type=int, |
| default=1, |
| help='visualize per interval samples.') |
| parser.add_argument( |
| '--wait-time', |
| type=float, |
| default=1, |
| help='display time of every window. (second)') |
| parser.add_argument( |
| '--launcher', |
| choices=['none', 'pytorch', 'slurm', 'mpi'], |
| default='none', |
| help='job launcher') |
| parser.add_argument('--local_rank', type=int, default=0) |
| args = parser.parse_args() |
| if 'LOCAL_RANK' not in os.environ: |
| os.environ['LOCAL_RANK'] = str(args.local_rank) |
| return args |
|
|
|
|
| def merge_args(cfg, args): |
| """Merge CLI arguments to config.""" |
|
|
| cfg.launcher = args.launcher |
| cfg.load_from = args.checkpoint |
|
|
| |
| |
| if args.work_dir is not None: |
| |
| cfg.work_dir = args.work_dir |
| elif cfg.get('work_dir', None) is None: |
| |
| cfg.work_dir = osp.join('./work_dirs', |
| osp.splitext(osp.basename(args.config))[0]) |
|
|
| |
| if args.show or (args.show_dir is not None): |
| assert 'visualization' in cfg.default_hooks, \ |
| 'PoseVisualizationHook is not set in the ' \ |
| '`default_hooks` field of config. Please set ' \ |
| '`visualization=dict(type="PoseVisualizationHook")`' |
|
|
| cfg.default_hooks.visualization.enable = True |
| cfg.default_hooks.visualization.show = args.show |
| if args.show: |
| cfg.default_hooks.visualization.wait_time = args.wait_time |
| cfg.default_hooks.visualization.out_dir = args.show_dir |
| cfg.default_hooks.visualization.interval = args.interval |
|
|
| |
| if args.dump is not None: |
| assert args.dump.endswith(('.pkl', '.pickle')), \ |
| 'The dump file must be a pkl file.' |
| dump_metric = dict(type='DumpResults', out_file_path=args.dump) |
| if isinstance(cfg.test_evaluator, (list, tuple)): |
| cfg.test_evaluator = [*cfg.test_evaluator, dump_metric] |
| else: |
| cfg.test_evaluator = [cfg.test_evaluator, dump_metric] |
|
|
| |
| if args.cfg_options is not None: |
| cfg.merge_from_dict(args.cfg_options) |
|
|
| return cfg |
|
|
|
|
| def main(): |
| args = parse_args() |
|
|
| |
| cfg = Config.fromfile(args.config) |
| cfg = merge_args(cfg, args) |
|
|
| |
| runner = Runner.from_cfg(cfg) |
|
|
| if args.out: |
|
|
| class SaveMetricHook(Hook): |
|
|
| def after_test_epoch(self, _, metrics=None): |
| if metrics is not None: |
| mmengine.dump(metrics, args.out) |
|
|
| runner.register_hook(SaveMetricHook(), 'LOWEST') |
|
|
| |
| runner.test() |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|