| |
| |
| |
| |
| |
| |
| import argparse |
| import time |
| import torch |
| from mmcv import Config |
| from mmcv.parallel import MMDataParallel |
| from mmcv.runner import load_checkpoint, wrap_fp16_model |
|
|
| from mmdet3d.datasets import build_dataloader, build_dataset |
| from mmdet3d.models import build_detector |
| import os |
| import sys |
| sys.path.append('./') |
| def parse_args(): |
| parser = argparse.ArgumentParser(description='MMDet benchmark a model') |
| parser.add_argument('config', help='test config file path') |
| parser.add_argument('--checkpoint', help='checkpoint file') |
| parser.add_argument('--samples', default=300, help='samples to benchmark') |
| parser.add_argument( |
| '--log-interval', default=50, help='interval of logging') |
| args = parser.parse_args() |
| return args |
|
|
|
|
| def main(): |
| args = parse_args() |
|
|
| cfg = Config.fromfile(args.config) |
| |
| if cfg.get('cudnn_benchmark', False): |
| torch.backends.cudnn.benchmark = True |
| cfg.model.pretrained = None |
| cfg.data.test.test_mode = True |
|
|
| if hasattr(cfg, 'plugin'): |
| if cfg.plugin: |
| import importlib |
| if hasattr(cfg, 'plugin_dir'): |
| plugin_dir = cfg.plugin_dir |
| _module_dir = os.path.dirname(plugin_dir) |
| _module_dir = _module_dir.split('/') |
| _module_path = _module_dir[0] |
|
|
| for m in _module_dir[1:]: |
| _module_path = _module_path + '.' + m |
| print(_module_path) |
| plg_lib = importlib.import_module(_module_path) |
| else: |
| |
| _module_dir = os.path.dirname(args.config) |
| _module_dir = _module_dir.split('/') |
| _module_path = _module_dir[0] |
| for m in _module_dir[1:]: |
| _module_path = _module_path + '.' + m |
| print(_module_path) |
| plg_lib = importlib.import_module(_module_path) |
| |
| |
| dataset = build_dataset(cfg.data.test) |
| data_loader = build_dataloader( |
| dataset, |
| samples_per_gpu=1, |
| workers_per_gpu=cfg.data.workers_per_gpu, |
| dist=False, |
| shuffle=False) |
|
|
| |
| cfg.model.train_cfg = None |
| model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) |
| |
| |
| |
| |
|
|
|
|
| model = MMDataParallel(model, device_ids=[0]) |
|
|
| model.eval() |
|
|
| |
| num_warmup = 5 |
| pure_inf_time = 0 |
|
|
| |
| for i, data in enumerate(data_loader): |
|
|
| torch.cuda.synchronize() |
| start_time = time.perf_counter() |
|
|
| with torch.no_grad(): |
| model(return_loss=False, rescale=True, **data) |
|
|
| torch.cuda.synchronize() |
| elapsed = time.perf_counter() - start_time |
|
|
| if i >= num_warmup: |
| pure_inf_time += elapsed |
| if (i + 1) % args.log_interval == 0: |
| fps = (i + 1 - num_warmup) / pure_inf_time |
| print(f'Done image [{i + 1:<3}/ {args.samples}], ' |
| f'fps: {fps:.1f} img / s') |
|
|
| if (i + 1) == args.samples: |
| pure_inf_time += elapsed |
| fps = (i + 1 - num_warmup) / pure_inf_time |
| print(f'Overall fps: {fps:.1f} img / s') |
| break |
|
|
|
|
| if __name__ == '__main__': |
| main() |
|
|