code
stringlengths
17
6.64M
class DistEvalHook(EvalHook): "Distributed evaluation hook.\n\n This hook will regularly perform evaluation in a given interval when\n performing in distributed environment.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader, whose dataset has\n implemented ``evaluate`` function.\n start (int | None, optional): Evaluation starting epoch. It enables\n evaluation before the training starts if ``start`` <= the resuming\n epoch. If None, whether to evaluate is merely decided by\n ``interval``. Default: None.\n interval (int): Evaluation interval. Default: 1.\n by_epoch (bool): Determine perform evaluation by epoch or by iteration.\n If set to True, it will perform by epoch. Otherwise, by iteration.\n default: True.\n save_best (str, optional): If a metric is specified, it would measure\n the best checkpoint during evaluation. The information about best\n checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep\n best score value and best checkpoint path, which will be also\n loaded when resume checkpoint. Options are the evaluation metrics\n on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox\n detection and instance segmentation. ``AR@100`` for proposal\n recall. If ``save_best`` is ``auto``, the first key of the returned\n ``OrderedDict`` result will be used. Default: None.\n rule (str | None, optional): Comparison rule for best score. If set to\n None, it will infer a reasonable rule. Keys such as 'acc', 'top'\n .etc will be inferred by 'greater' rule. Keys contain 'loss' will\n be inferred by 'less' rule. Options are 'greater', 'less', None.\n Default: None.\n test_fn (callable, optional): test a model with samples from a\n dataloader in a multi-gpu manner, and return the test results. If\n ``None``, the default test function ``mmcv.engine.multi_gpu_test``\n will be used. (default: ``None``)\n tmpdir (str | None): Temporary directory to save the results of all\n processes. Default: None.\n gpu_collect (bool): Whether to use gpu or cpu to collect results.\n Default: False.\n broadcast_bn_buffer (bool): Whether to broadcast the\n buffer(running_mean and running_var) of rank 0 to other rank\n before evaluation. Default: True.\n out_dir (str, optional): The root directory to save checkpoints. If not\n specified, `runner.work_dir` will be used by default. If specified,\n the `out_dir` will be the concatenation of `out_dir` and the last\n level directory of `runner.work_dir`.\n file_client_args (dict): Arguments to instantiate a FileClient.\n See :class:`mmcv.fileio.FileClient` for details. Default: None.\n **eval_kwargs: Evaluation arguments fed into the evaluate function of\n the dataset.\n " def __init__(self, dataloader, start=None, interval=1, by_epoch=True, save_best=None, rule=None, test_fn=None, greater_keys=None, less_keys=None, broadcast_bn_buffer=True, tmpdir=None, gpu_collect=False, out_dir=None, file_client_args=None, **eval_kwargs): if (test_fn is None): from mmcv.engine import multi_gpu_test test_fn = multi_gpu_test super().__init__(dataloader, start=start, interval=interval, by_epoch=by_epoch, save_best=save_best, rule=rule, test_fn=test_fn, greater_keys=greater_keys, less_keys=less_keys, out_dir=out_dir, file_client_args=file_client_args, **eval_kwargs) self.broadcast_bn_buffer = broadcast_bn_buffer self.tmpdir = tmpdir self.gpu_collect = gpu_collect def _do_evaluate(self, runner): 'perform evaluation and save ckpt.' if self.broadcast_bn_buffer: model = runner.model for (name, module) in model.named_modules(): if (isinstance(module, _BatchNorm) and module.track_running_stats): dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) tmpdir = self.tmpdir if (tmpdir is None): tmpdir = osp.join(runner.work_dir, '.eval_hook') results = self.test_fn(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if (runner.rank == 0): print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if (self.save_best and key_score): self._save_ckpt(runner, key_score)
class Hook(): stages = ('before_run', 'before_train_epoch', 'before_train_iter', 'after_train_iter', 'after_train_epoch', 'before_val_epoch', 'before_val_iter', 'after_val_iter', 'after_val_epoch', 'after_run') def before_run(self, runner): pass def after_run(self, runner): pass def before_epoch(self, runner): pass def after_epoch(self, runner): pass def before_iter(self, runner): pass def after_iter(self, runner): pass def before_train_epoch(self, runner): self.before_epoch(runner) def before_val_epoch(self, runner): self.before_epoch(runner) def after_train_epoch(self, runner): self.after_epoch(runner) def after_val_epoch(self, runner): self.after_epoch(runner) def before_train_iter(self, runner): self.before_iter(runner) def before_val_iter(self, runner): self.before_iter(runner) def after_train_iter(self, runner): self.after_iter(runner) def after_val_iter(self, runner): self.after_iter(runner) def every_n_epochs(self, runner, n): return ((((runner.epoch + 1) % n) == 0) if (n > 0) else False) def every_n_inner_iters(self, runner, n): return ((((runner.inner_iter + 1) % n) == 0) if (n > 0) else False) def every_n_iters(self, runner, n): return ((((runner.iter + 1) % n) == 0) if (n > 0) else False) def end_of_epoch(self, runner): return ((runner.inner_iter + 1) == len(runner.data_loader)) def is_last_epoch(self, runner): return ((runner.epoch + 1) == runner._max_epochs) def is_last_iter(self, runner): return ((runner.iter + 1) == runner._max_iters) def get_triggered_stages(self): trigger_stages = set() for stage in Hook.stages: if is_method_overridden(stage, Hook, self): trigger_stages.add(stage) method_stages_map = {'before_epoch': ['before_train_epoch', 'before_val_epoch'], 'after_epoch': ['after_train_epoch', 'after_val_epoch'], 'before_iter': ['before_train_iter', 'before_val_iter'], 'after_iter': ['after_train_iter', 'after_val_iter']} for (method, map_stages) in method_stages_map.items(): if is_method_overridden(method, Hook, self): trigger_stages.update(map_stages) return [stage for stage in Hook.stages if (stage in trigger_stages)]
@HOOKS.register_module() class IterTimerHook(Hook): def before_epoch(self, runner): self.t = time.time() def before_iter(self, runner): runner.log_buffer.update({'data_time': (time.time() - self.t)}) def after_iter(self, runner): runner.log_buffer.update({'time': (time.time() - self.t)}) self.t = time.time()
class LoggerHook(Hook): 'Base class for logger hooks.\n\n Args:\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default True.\n ' __metaclass__ = ABCMeta def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): self.interval = interval self.ignore_last = ignore_last self.reset_flag = reset_flag self.by_epoch = by_epoch @abstractmethod def log(self, runner): pass @staticmethod def is_scalar(val, include_np=True, include_torch=True): 'Tell the input variable is a scalar or not.\n\n Args:\n val: Input variable.\n include_np (bool): Whether include 0-d np.ndarray as a scalar.\n include_torch (bool): Whether include 0-d torch.Tensor as a scalar.\n\n Returns:\n bool: True or False.\n ' if isinstance(val, numbers.Number): return True elif (include_np and isinstance(val, np.ndarray) and (val.ndim == 0)): return True elif (include_torch and isinstance(val, torch.Tensor) and (len(val) == 1)): return True else: return False def get_mode(self, runner): if (runner.mode == 'train'): if ('time' in runner.log_buffer.output): mode = 'train' else: mode = 'val' elif (runner.mode == 'val'): mode = 'val' else: raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}") return mode def get_epoch(self, runner): if (runner.mode == 'train'): epoch = (runner.epoch + 1) elif (runner.mode == 'val'): epoch = runner.epoch else: raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}") return epoch def get_iter(self, runner, inner_iter=False): 'Get the current training iteration step.' if (self.by_epoch and inner_iter): current_iter = (runner.inner_iter + 1) else: current_iter = (runner.iter + 1) return current_iter def get_lr_tags(self, runner): tags = {} lrs = runner.current_lr() if isinstance(lrs, dict): for (name, value) in lrs.items(): tags[f'learning_rate/{name}'] = value[0] else: tags['learning_rate'] = lrs[0] return tags def get_momentum_tags(self, runner): tags = {} momentums = runner.current_momentum() if isinstance(momentums, dict): for (name, value) in momentums.items(): tags[f'momentum/{name}'] = value[0] else: tags['momentum'] = momentums[0] return tags def get_loggable_tags(self, runner, allow_scalar=True, allow_text=False, add_mode=True, tags_to_skip=('time', 'data_time')): tags = {} for (var, val) in runner.log_buffer.output.items(): if (var in tags_to_skip): continue if (self.is_scalar(val) and (not allow_scalar)): continue if (isinstance(val, str) and (not allow_text)): continue if add_mode: var = f'{self.get_mode(runner)}/{var}' tags[var] = val tags.update(self.get_lr_tags(runner)) tags.update(self.get_momentum_tags(runner)) return tags def before_run(self, runner): for hook in runner.hooks[::(- 1)]: if isinstance(hook, LoggerHook): hook.reset_flag = True break def before_epoch(self, runner): runner.log_buffer.clear() def after_train_iter(self, runner): if (self.by_epoch and self.every_n_inner_iters(runner, self.interval)): runner.log_buffer.average(self.interval) elif ((not self.by_epoch) and self.every_n_iters(runner, self.interval)): runner.log_buffer.average(self.interval) elif (self.end_of_epoch(runner) and (not self.ignore_last)): runner.log_buffer.average(self.interval) if runner.log_buffer.ready: self.log(runner) if self.reset_flag: runner.log_buffer.clear_output() def after_train_epoch(self, runner): if runner.log_buffer.ready: self.log(runner) if self.reset_flag: runner.log_buffer.clear_output() def after_val_epoch(self, runner): runner.log_buffer.average() self.log(runner) if self.reset_flag: runner.log_buffer.clear_output()
@HOOKS.register_module() class DvcliveLoggerHook(LoggerHook): 'Class to log metrics with dvclive.\n\n It requires `dvclive`_ to be installed.\n\n Args:\n model_file (str): Default None. If not None, after each epoch the\n model will be saved to {model_file}.\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n kwargs: Arguments for instantiating `Live`_.\n\n .. _dvclive:\n https://dvc.org/doc/dvclive\n\n .. _Live:\n https://dvc.org/doc/dvclive/api-reference/live#parameters\n ' def __init__(self, model_file=None, interval=10, ignore_last=True, reset_flag=False, by_epoch=True, **kwargs): super().__init__(interval, ignore_last, reset_flag, by_epoch) self.model_file = model_file self.import_dvclive(**kwargs) def import_dvclive(self, **kwargs): try: from dvclive import Live except ImportError: raise ImportError('Please run "pip install dvclive" to install dvclive') self.dvclive = Live(**kwargs) @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.dvclive.set_step(self.get_iter(runner)) for (k, v) in tags.items(): self.dvclive.log(k, v) @master_only def after_train_epoch(self, runner): super().after_train_epoch(runner) if (self.model_file is not None): runner.save_checkpoint(Path(self.model_file).parent, filename_tmpl=Path(self.model_file).name, create_symlink=False)
@HOOKS.register_module() class MlflowLoggerHook(LoggerHook): 'Class to log metrics and (optionally) a trained model to MLflow.\n\n It requires `MLflow`_ to be installed.\n\n Args:\n exp_name (str, optional): Name of the experiment to be used.\n Default None. If not None, set the active experiment.\n If experiment does not exist, an experiment with provided name\n will be created.\n tags (Dict[str], optional): Tags for the current run.\n Default None. If not None, set tags for the current run.\n log_model (bool, optional): Whether to log an MLflow artifact.\n Default True. If True, log runner.model as an MLflow artifact\n for the current run.\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n\n .. _MLflow:\n https://www.mlflow.org/docs/latest/index.html\n ' def __init__(self, exp_name=None, tags=None, log_model=True, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(MlflowLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_mlflow() self.exp_name = exp_name self.tags = tags self.log_model = log_model def import_mlflow(self): try: import mlflow import mlflow.pytorch as mlflow_pytorch except ImportError: raise ImportError('Please run "pip install mlflow" to install mlflow') self.mlflow = mlflow self.mlflow_pytorch = mlflow_pytorch @master_only def before_run(self, runner): super(MlflowLoggerHook, self).before_run(runner) if (self.exp_name is not None): self.mlflow.set_experiment(self.exp_name) if (self.tags is not None): self.mlflow.set_tags(self.tags) @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.mlflow.log_metrics(tags, step=self.get_iter(runner)) @master_only def after_run(self, runner): if self.log_model: self.mlflow_pytorch.log_model(runner.model, 'models', pip_requirements=[f'torch=={TORCH_VERSION}'])
@HOOKS.register_module() class NeptuneLoggerHook(LoggerHook): "Class to log metrics to NeptuneAI.\n\n It requires `Neptune`_ to be installed.\n\n Args:\n init_kwargs (dict): a dict contains the initialization keys as below:\n\n - project (str): Name of a project in a form of\n namespace/project_name. If None, the value of NEPTUNE_PROJECT\n environment variable will be taken.\n - api_token (str): User’s API token. If None, the value of\n NEPTUNE_API_TOKEN environment variable will be taken. Note: It is\n strongly recommended to use NEPTUNE_API_TOKEN environment\n variable rather than placing your API token in plain text in your\n source code.\n - name (str, optional, default is 'Untitled'): Editable name of the\n run. Name is displayed in the run's Details and in Runs table as\n a column.\n\n Check https://docs.neptune.ai/api-reference/neptune#init for more\n init arguments.\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than ``interval``. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: True.\n with_step (bool): If True, the step will be logged from\n ``self.get_iters``. Otherwise, step will not be logged.\n Default: True.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n\n .. _Neptune:\n https://docs.neptune.ai\n " def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=True, with_step=True, by_epoch=True): super(NeptuneLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_neptune() self.init_kwargs = init_kwargs self.with_step = with_step def import_neptune(self): try: import neptune.new as neptune except ImportError: raise ImportError('Please run "pip install neptune-client" to install neptune') self.neptune = neptune self.run = None @master_only def before_run(self, runner): if self.init_kwargs: self.run = self.neptune.init(**self.init_kwargs) else: self.run = self.neptune.init() @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: for (tag_name, tag_value) in tags.items(): if self.with_step: self.run[tag_name].log(tag_value, step=self.get_iter(runner)) else: tags['global_step'] = self.get_iter(runner) self.run[tag_name].log(tags) @master_only def after_run(self, runner): self.run.stop()
@HOOKS.register_module() class PaviLoggerHook(LoggerHook): "Class to visual model, log metrics (for internal use).\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys.\n add_graph (bool): Whether to visual model. Default: False.\n add_last_ckpt (bool): Whether to save checkpoint after run.\n Default: False.\n interval (int): Logging interval (every k iterations). Default: True.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n img_key (string): Get image data from Dataset. Default: 'img_info'.\n " def __init__(self, init_kwargs=None, add_graph=False, add_last_ckpt=False, interval=10, ignore_last=True, reset_flag=False, by_epoch=True, img_key='img_info'): super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.init_kwargs = init_kwargs self.add_graph = add_graph self.add_last_ckpt = add_last_ckpt self.img_key = img_key @master_only def before_run(self, runner): super(PaviLoggerHook, self).before_run(runner) try: from pavi import SummaryWriter except ImportError: raise ImportError('Please run "pip install pavi" to install pavi.') self.run_name = runner.work_dir.split('/')[(- 1)] if (not self.init_kwargs): self.init_kwargs = dict() self.init_kwargs['name'] = self.run_name self.init_kwargs['model'] = runner._model_name if (runner.meta is not None): if ('config_dict' in runner.meta): config_dict = runner.meta['config_dict'] assert isinstance(config_dict, dict), f'meta["config_dict"] has to be of a dict, but got {type(config_dict)}' elif ('config_file' in runner.meta): config_file = runner.meta['config_file'] config_dict = dict(mmcv.Config.fromfile(config_file)) else: config_dict = None if (config_dict is not None): config_dict = config_dict.copy() config_dict.setdefault('max_iter', runner.max_iters) config_dict = json.loads(mmcv.dump(config_dict, file_format='json')) session_text = yaml.dump(config_dict) self.init_kwargs['session_text'] = session_text self.writer = SummaryWriter(**self.init_kwargs) def get_step(self, runner): 'Get the total training step/epoch.' if ((self.get_mode(runner) == 'val') and self.by_epoch): return self.get_epoch(runner) else: return self.get_iter(runner) @master_only def log(self, runner): tags = self.get_loggable_tags(runner, add_mode=False) if tags: self.writer.add_scalars(self.get_mode(runner), tags, self.get_step(runner)) @master_only def after_run(self, runner): if self.add_last_ckpt: ckpt_path = osp.join(runner.work_dir, 'latest.pth') if osp.islink(ckpt_path): ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path)) if osp.isfile(ckpt_path): iteration = (runner.epoch if self.by_epoch else runner.iter) return self.writer.add_snapshot_file(tag=self.run_name, snapshot_file_path=ckpt_path, iteration=iteration) self.writer.close() @master_only def before_epoch(self, runner): if ((runner.epoch == 0) and self.add_graph): if is_module_wrapper(runner.model): _model = runner.model.module else: _model = runner.model device = next(_model.parameters()).device data = next(iter(runner.data_loader)) image = data[self.img_key][0:1].to(device) with torch.no_grad(): self.writer.add_graph(_model, image)
@HOOKS.register_module() class SegmindLoggerHook(LoggerHook): 'Class to log metrics to Segmind.\n\n It requires `Segmind`_ to be installed.\n\n Args:\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default True.\n\n .. _Segmind:\n https://docs.segmind.com/python-library\n ' def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(SegmindLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_segmind() def import_segmind(self): try: import segmind except ImportError: raise ImportError("Please run 'pip install segmind' to install segmind") self.log_metrics = segmind.tracking.fluent.log_metrics self.mlflow_log = segmind.utils.logging_utils.try_mlflow_log @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.mlflow_log(self.log_metrics, tags, step=runner.epoch, epoch=runner.epoch)
@HOOKS.register_module() class TensorboardLoggerHook(LoggerHook): 'Class to log metrics to Tensorboard.\n\n Args:\n log_dir (string): Save directory location. Default: None. If default\n values are used, directory location is ``runner.work_dir``/tf_logs.\n interval (int): Logging interval (every k iterations). Default: True.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n ' def __init__(self, log_dir=None, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(TensorboardLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.log_dir = log_dir @master_only def before_run(self, runner): super(TensorboardLoggerHook, self).before_run(runner) if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.1'))): try: from tensorboardX import SummaryWriter except ImportError: raise ImportError('Please install tensorboardX to use TensorboardLoggerHook.') else: try: from torch.utils.tensorboard import SummaryWriter except ImportError: raise ImportError('Please run "pip install future tensorboard" to install the dependencies to use torch.utils.tensorboard (applicable to PyTorch 1.1 or higher)') if (self.log_dir is None): self.log_dir = osp.join(runner.work_dir, 'tf_logs') self.writer = SummaryWriter(self.log_dir) @master_only def log(self, runner): tags = self.get_loggable_tags(runner, allow_text=True) for (tag, val) in tags.items(): if isinstance(val, str): self.writer.add_text(tag, val, self.get_iter(runner)) else: self.writer.add_scalar(tag, val, self.get_iter(runner)) @master_only def after_run(self, runner): self.writer.close()
@HOOKS.register_module() class TextLoggerHook(LoggerHook): "Logger hook in text.\n\n In this logger hook, the information will be printed on terminal and\n saved in json file.\n\n Args:\n by_epoch (bool, optional): Whether EpochBasedRunner is used.\n Default: True.\n interval (int, optional): Logging interval (every k iterations).\n Default: 10.\n ignore_last (bool, optional): Ignore the log of last iterations in each\n epoch if less than :attr:`interval`. Default: True.\n reset_flag (bool, optional): Whether to clear the output buffer after\n logging. Default: False.\n interval_exp_name (int, optional): Logging interval for experiment\n name. This feature is to help users conveniently get the experiment\n information from screen or log file. Default: 1000.\n out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.\n If ``out_dir`` is specified, logs will be copied to a new directory\n which is the concatenation of ``out_dir`` and the last level\n directory of ``runner.work_dir``. Default: None.\n `New in version 1.3.16.`\n out_suffix (str or tuple[str], optional): Those filenames ending with\n ``out_suffix`` will be copied to ``out_dir``.\n Default: ('.log.json', '.log', '.py').\n `New in version 1.3.16.`\n keep_local (bool, optional): Whether to keep local log when\n :attr:`out_dir` is specified. If False, the local log will be\n removed. Default: True.\n `New in version 1.3.16.`\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n `New in version 1.3.16.`\n " def __init__(self, by_epoch=True, interval=10, ignore_last=True, reset_flag=False, interval_exp_name=1000, out_dir=None, out_suffix=('.log.json', '.log', '.py'), keep_local=True, file_client_args=None): super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.by_epoch = by_epoch self.time_sec_tot = 0 self.interval_exp_name = interval_exp_name if ((out_dir is None) and (file_client_args is not None)): raise ValueError('file_client_args should be "None" when `out_dir` is notspecified.') self.out_dir = out_dir if (not ((out_dir is None) or isinstance(out_dir, str) or is_tuple_of(out_dir, str))): raise TypeError('out_dir should be "None" or string or tuple of string, but got {out_dir}') self.out_suffix = out_suffix self.keep_local = keep_local self.file_client_args = file_client_args if (self.out_dir is not None): self.file_client = FileClient.infer_client(file_client_args, self.out_dir) def before_run(self, runner): super(TextLoggerHook, self).before_run(runner) if (self.out_dir is not None): self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info(f'Text logs will be saved to {self.out_dir} by {self.file_client.name} after the training process.') self.start_iter = runner.iter self.json_log_path = osp.join(runner.work_dir, f'{runner.timestamp}.log.json') if (runner.meta is not None): self._dump_log(runner.meta, runner) def _get_max_memory(self, runner): device = getattr(runner.model, 'output_device', None) mem = torch.cuda.max_memory_allocated(device=device) mem_mb = torch.tensor([(int(mem) // (1024 * 1024))], dtype=torch.int, device=device) if (runner.world_size > 1): dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX) return mem_mb.item() def _log_info(self, log_dict, runner): if ((runner.meta is not None) and ('exp_name' in runner.meta)): if (self.every_n_iters(runner, self.interval_exp_name) or (self.by_epoch and self.end_of_epoch(runner))): exp_info = f"Exp name: {runner.meta['exp_name']}" runner.logger.info(exp_info) if (log_dict['mode'] == 'train'): if isinstance(log_dict['lr'], dict): lr_str = [] for (k, val) in log_dict['lr'].items(): lr_str.append(f'lr_{k}: {val:.3e}') lr_str = ' '.join(lr_str) else: lr_str = f"lr: {log_dict['lr']:.3e}" if self.by_epoch: log_str = f"Epoch [{log_dict['epoch']}][{log_dict['iter']}/{len(runner.data_loader)}] " else: log_str = f"Iter [{log_dict['iter']}/{runner.max_iters}] " log_str += f'{lr_str}, ' if ('time' in log_dict.keys()): self.time_sec_tot += (log_dict['time'] * self.interval) time_sec_avg = (self.time_sec_tot / ((runner.iter - self.start_iter) + 1)) eta_sec = (time_sec_avg * ((runner.max_iters - runner.iter) - 1)) eta_str = str(datetime.timedelta(seconds=int(eta_sec))) log_str += f'eta: {eta_str}, ' log_str += f"time: {log_dict['time']:.3f}, data_time: {log_dict['data_time']:.3f}, " if torch.cuda.is_available(): log_str += f"memory: {log_dict['memory']}, " elif self.by_epoch: log_str = f"Epoch({log_dict['mode']}) [{log_dict['epoch']}][{log_dict['iter']}] " else: log_str = f"Iter({log_dict['mode']}) [{log_dict['iter']}] " log_items = [] for (name, val) in log_dict.items(): if (name in ['mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', 'memory', 'epoch']): continue if isinstance(val, float): val = f'{val:.4f}' log_items.append(f'{name}: {val}') log_str += ', '.join(log_items) runner.logger.info(log_str) def _dump_log(self, log_dict, runner): json_log = OrderedDict() for (k, v) in log_dict.items(): json_log[k] = self._round_float(v) if (runner.rank == 0): with open(self.json_log_path, 'a+') as f: mmcv.dump(json_log, f, file_format='json') f.write('\n') def _round_float(self, items): if isinstance(items, list): return [self._round_float(item) for item in items] elif isinstance(items, float): return round(items, 5) else: return items def log(self, runner): if ('eval_iter_num' in runner.log_buffer.output): cur_iter = runner.log_buffer.output.pop('eval_iter_num') else: cur_iter = self.get_iter(runner, inner_iter=True) log_dict = OrderedDict(mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=cur_iter) cur_lr = runner.current_lr() if isinstance(cur_lr, list): log_dict['lr'] = cur_lr[0] else: assert isinstance(cur_lr, dict) log_dict['lr'] = {} for (k, lr_) in cur_lr.items(): assert isinstance(lr_, list) log_dict['lr'].update({k: lr_[0]}) if ('time' in runner.log_buffer.output): if torch.cuda.is_available(): log_dict['memory'] = self._get_max_memory(runner) log_dict = dict(log_dict, **runner.log_buffer.output) self._log_info(log_dict, runner) self._dump_log(log_dict, runner) return log_dict def after_run(self, runner): if (self.out_dir is not None): for filename in scandir(runner.work_dir, self.out_suffix, True): local_filepath = osp.join(runner.work_dir, filename) out_filepath = self.file_client.join_path(self.out_dir, filename) with open(local_filepath, 'r') as f: self.file_client.put_text(f.read(), out_filepath) runner.logger.info(f'The file {local_filepath} has been uploaded to {out_filepath}.') if (not self.keep_local): os.remove(local_filepath) runner.logger.info(f'{local_filepath} was removed due to the `self.keep_local=False`')
@HOOKS.register_module() class WandbLoggerHook(LoggerHook): "Class to log metrics with wandb.\n\n It requires `wandb`_ to be installed.\n\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys. Check\n https://docs.wandb.ai/ref/python/init for more init arguments.\n interval (int): Logging interval (every k iterations).\n Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`.\n Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n commit (bool): Save the metrics dict to the wandb server and increment\n the step. If false ``wandb.log`` just updates the current metrics\n dict with the row argument and metrics won't be saved until\n ``wandb.log`` is called with ``commit=True``.\n Default: True.\n by_epoch (bool): Whether EpochBasedRunner is used.\n Default: True.\n with_step (bool): If True, the step will be logged from\n ``self.get_iters``. Otherwise, step will not be logged.\n Default: True.\n log_artifact (bool): If True, artifacts in {work_dir} will be uploaded\n to wandb after training ends.\n Default: True\n `New in version 1.4.3.`\n out_suffix (str or tuple[str], optional): Those filenames ending with\n ``out_suffix`` will be uploaded to wandb.\n Default: ('.log.json', '.log', '.py').\n `New in version 1.4.3.`\n\n .. _wandb:\n https://docs.wandb.ai\n " def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=False, commit=True, by_epoch=True, with_step=True, log_artifact=True, out_suffix=('.log.json', '.log', '.py')): super(WandbLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_wandb() self.init_kwargs = init_kwargs self.commit = commit self.with_step = with_step self.log_artifact = log_artifact self.out_suffix = out_suffix def import_wandb(self): try: import wandb except ImportError: raise ImportError('Please run "pip install wandb" to install wandb') self.wandb = wandb @master_only def before_run(self, runner): super(WandbLoggerHook, self).before_run(runner) if (self.wandb is None): self.import_wandb() if self.init_kwargs: self.wandb.init(**self.init_kwargs) else: self.wandb.init() @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: if self.with_step: self.wandb.log(tags, step=self.get_iter(runner), commit=self.commit) else: tags['global_step'] = self.get_iter(runner) self.wandb.log(tags, commit=self.commit) @master_only def after_run(self, runner): if self.log_artifact: wandb_artifact = self.wandb.Artifact(name='artifacts', type='model') for filename in scandir(runner.work_dir, self.out_suffix, True): local_filepath = osp.join(runner.work_dir, filename) wandb_artifact.add_file(local_filepath) self.wandb.log_artifact(wandb_artifact) self.wandb.join()
class LrUpdaterHook(Hook): "LR Scheduler in MMCV.\n\n Args:\n by_epoch (bool): LR changes epoch by epoch\n warmup (string): Type of warmup used. It can be None(use no warmup),\n 'constant', 'linear' or 'exp'\n warmup_iters (int): The number of iterations or epochs that warmup\n lasts\n warmup_ratio (float): LR used at the beginning of warmup equals to\n warmup_ratio * initial_lr\n warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters\n means the number of epochs that warmup lasts, otherwise means the\n number of iteration that warmup lasts\n " def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.1, warmup_by_epoch=False): if (warmup is not None): if (warmup not in ['constant', 'linear', 'exp']): raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"') if (warmup is not None): assert (warmup_iters > 0), '"warmup_iters" must be a positive integer' assert (0 < warmup_ratio <= 1.0), '"warmup_ratio" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.warmup_by_epoch = warmup_by_epoch if self.warmup_by_epoch: self.warmup_epochs = self.warmup_iters self.warmup_iters = None else: self.warmup_epochs = None self.base_lr = [] self.regular_lr = [] def _set_lr(self, runner, lr_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, lr) in zip(optim.param_groups, lr_groups[k]): param_group['lr'] = lr else: for (param_group, lr) in zip(runner.optimizer.param_groups, lr_groups): param_group['lr'] = lr def get_lr(self, runner, base_lr): raise NotImplementedError def get_regular_lr(self, runner): if isinstance(runner.optimizer, dict): lr_groups = {} for k in runner.optimizer.keys(): _lr_group = [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr[k]] lr_groups.update({k: _lr_group}) return lr_groups else: return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): if (self.warmup == 'constant'): warmup_lr = [(_lr * self.warmup_ratio) for _lr in regular_lr] elif (self.warmup == 'linear'): k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio)) warmup_lr = [(_lr * (1 - k)) for _lr in regular_lr] elif (self.warmup == 'exp'): k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters))) warmup_lr = [(_lr * k) for _lr in regular_lr] return warmup_lr if isinstance(self.regular_lr, dict): lr_groups = {} for (key, regular_lr) in self.regular_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.regular_lr) def before_run(self, runner): if isinstance(runner.optimizer, dict): self.base_lr = {} for (k, optim) in runner.optimizer.items(): for group in optim.param_groups: group.setdefault('initial_lr', group['lr']) _base_lr = [group['initial_lr'] for group in optim.param_groups] self.base_lr.update({k: _base_lr}) else: for group in runner.optimizer.param_groups: group.setdefault('initial_lr', group['lr']) self.base_lr = [group['initial_lr'] for group in runner.optimizer.param_groups] def before_train_epoch(self, runner): if (self.warmup_iters is None): epoch_len = len(runner.data_loader) self.warmup_iters = (self.warmup_epochs * epoch_len) if (not self.by_epoch): return self.regular_lr = self.get_regular_lr(runner) self._set_lr(runner, self.regular_lr) def before_train_iter(self, runner): cur_iter = runner.iter if (not self.by_epoch): self.regular_lr = self.get_regular_lr(runner) if ((self.warmup is None) or (cur_iter >= self.warmup_iters)): self._set_lr(runner, self.regular_lr) else: warmup_lr = self.get_warmup_lr(cur_iter) self._set_lr(runner, warmup_lr) elif self.by_epoch: if ((self.warmup is None) or (cur_iter > self.warmup_iters)): return elif (cur_iter == self.warmup_iters): self._set_lr(runner, self.regular_lr) else: warmup_lr = self.get_warmup_lr(cur_iter) self._set_lr(runner, warmup_lr)
@HOOKS.register_module() class FixedLrUpdaterHook(LrUpdaterHook): def __init__(self, **kwargs): super(FixedLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): return base_lr
@HOOKS.register_module() class StepLrUpdaterHook(LrUpdaterHook): "Step LR scheduler with min_lr clipping.\n\n Args:\n step (int | list[int]): Step to decay the LR. If an int value is given,\n regard it as the decay interval. If a list is given, decay LR at\n these steps.\n gamma (float, optional): Decay LR ratio. Default: 0.1.\n min_lr (float, optional): Minimum LR value to keep. If LR after decay\n is lower than `min_lr`, it will be clipped to this value. If None\n is given, we don't perform lr clipping. Default: None.\n " def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([(s > 0) for s in step]) elif isinstance(step, int): assert (step > 0) else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_lr = min_lr super(StepLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) if isinstance(self.step, int): exp = (progress // self.step) else: exp = len(self.step) for (i, s) in enumerate(self.step): if (progress < s): exp = i break lr = (base_lr * (self.gamma ** exp)) if (self.min_lr is not None): lr = max(lr, self.min_lr) return lr
@HOOKS.register_module() class ExpLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, **kwargs): self.gamma = gamma super(ExpLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) return (base_lr * (self.gamma ** progress))
@HOOKS.register_module() class PolyLrUpdaterHook(LrUpdaterHook): def __init__(self, power=1.0, min_lr=0.0, **kwargs): self.power = power self.min_lr = min_lr super(PolyLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters coeff = ((1 - (progress / max_progress)) ** self.power) return (((base_lr - self.min_lr) * coeff) + self.min_lr)
@HOOKS.register_module() class InvLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, power=1.0, **kwargs): self.gamma = gamma self.power = power super(InvLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) return (base_lr * ((1 + (self.gamma * progress)) ** (- self.power)))
@HOOKS.register_module() class CosineAnnealingLrUpdaterHook(LrUpdaterHook): def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr return annealing_cos(base_lr, target_lr, (progress / max_progress))
@HOOKS.register_module() class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): 'Flat + Cosine lr schedule.\n\n Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501\n\n Args:\n start_percent (float): When to start annealing the learning rate\n after the percentage of the total training steps.\n The value should be in range [0, 1).\n Default: 0.75\n min_lr (float, optional): The minimum lr. Default: None.\n min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.\n Either `min_lr` or `min_lr_ratio` should be specified.\n Default: None.\n ' def __init__(self, start_percent=0.75, min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) if ((start_percent < 0) or (start_percent > 1) or (not isinstance(start_percent, float))): raise ValueError(f'expected float between 0 and 1 start_percent, but got {start_percent}') self.start_percent = start_percent self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: start = round((runner.max_epochs * self.start_percent)) progress = (runner.epoch - start) max_progress = (runner.max_epochs - start) else: start = round((runner.max_iters * self.start_percent)) progress = (runner.iter - start) max_progress = (runner.max_iters - start) if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr if (progress < 0): return base_lr else: return annealing_cos(base_lr, target_lr, (progress / max_progress))
@HOOKS.register_module() class CosineRestartLrUpdaterHook(LrUpdaterHook): 'Cosine annealing with restarts learning rate scheme.\n\n Args:\n periods (list[int]): Periods for each cosine anneling cycle.\n restart_weights (list[float], optional): Restart weights at each\n restart iteration. Default: [1].\n min_lr (float, optional): The minimum lr. Default: None.\n min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.\n Either `min_lr` or `min_lr_ratio` should be specified.\n Default: None.\n ' def __init__(self, periods, restart_weights=[1], min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) self.periods = periods self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio self.restart_weights = restart_weights assert (len(self.periods) == len(self.restart_weights)), 'periods and restart_weights should have the same length.' super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) self.cumulative_periods = [sum(self.periods[0:(i + 1)]) for i in range(0, len(self.periods))] def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch else: progress = runner.iter if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr idx = get_position_from_periods(progress, self.cumulative_periods) current_weight = self.restart_weights[idx] nearest_restart = (0 if (idx == 0) else self.cumulative_periods[(idx - 1)]) current_periods = self.periods[idx] alpha = min(((progress - nearest_restart) / current_periods), 1) return annealing_cos(base_lr, target_lr, alpha, current_weight)
def get_position_from_periods(iteration, cumulative_periods): 'Get the position from a period list.\n\n It will return the index of the right-closest number in the period list.\n For example, the cumulative_periods = [100, 200, 300, 400],\n if iteration == 50, return 0;\n if iteration == 210, return 2;\n if iteration == 300, return 3.\n\n Args:\n iteration (int): Current iteration.\n cumulative_periods (list[int]): Cumulative period list.\n\n Returns:\n int: The position of the right-closest number in the period list.\n ' for (i, period) in enumerate(cumulative_periods): if (iteration < period): return i raise ValueError(f'Current iteration {iteration} exceeds cumulative_periods {cumulative_periods}')
@HOOKS.register_module() class CyclicLrUpdaterHook(LrUpdaterHook): "Cyclic LR Scheduler.\n\n Implement the cyclical learning rate policy (CLR) described in\n https://arxiv.org/pdf/1506.01186.pdf\n\n Different from the original paper, we use cosine annealing rather than\n triangular policy inside a cycle. This improves the performance in the\n 3D detection area.\n\n Args:\n by_epoch (bool, optional): Whether to update LR by epoch.\n target_ratio (tuple[float], optional): Relative ratio of the highest LR\n and the lowest LR to the initial LR.\n cyclic_times (int, optional): Number of cycles during training\n step_ratio_up (float, optional): The ratio of the increasing process of\n LR in the total cycle.\n anneal_strategy (str, optional): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing. Default: 'cos'.\n gamma (float, optional): Cycle decay ratio. Default: 1.\n It takes values in the range (0, 1]. The difference between the\n maximum learning rate and the minimum learning rate decreases\n periodically when it is less than 1. `New in version 1.4.4.`\n " def __init__(self, by_epoch=False, target_ratio=(10, 0.0001), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', gamma=1, **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, (target_ratio / 100000.0)) elif isinstance(target_ratio, tuple): target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio) else: raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}') assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats' assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)' assert (0 < gamma <= 1), '"gamma" must be in range (0, 1]' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.gamma = gamma self.max_iter_per_phase = None self.lr_phases = [] if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear assert (not by_epoch), 'currently only support "by_epoch" = False' super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicLrUpdaterHook, self).before_run(runner) self.max_iter_per_phase = (runner.max_iters // self.cyclic_times) iter_up_phase = int((self.step_ratio_up * self.max_iter_per_phase)) self.lr_phases.append([0, iter_up_phase, 1, self.target_ratio[0]]) self.lr_phases.append([iter_up_phase, self.max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]]) def get_lr(self, runner, base_lr): curr_iter = (runner.iter % self.max_iter_per_phase) curr_cycle = (runner.iter // self.max_iter_per_phase) scale = (self.gamma ** curr_cycle) for (start_iter, end_iter, start_ratio, end_ratio) in self.lr_phases: if (start_iter <= curr_iter < end_iter): if (start_iter == 0): end_ratio = ((1 - scale) + (end_ratio * scale)) else: start_ratio = ((1 - scale) + (start_ratio * scale)) progress = (curr_iter - start_iter) return self.anneal_func((base_lr * start_ratio), (base_lr * end_ratio), (progress / (end_iter - start_iter)))
@HOOKS.register_module() class OneCycleLrUpdaterHook(LrUpdaterHook): "One Cycle LR Scheduler.\n\n The 1cycle learning rate policy changes the learning rate after every\n batch. The one cycle learning rate policy is described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n Args:\n max_lr (float or list): Upper learning rate boundaries in the cycle\n for each parameter group.\n total_steps (int, optional): The total number of steps in the cycle.\n Note that if a value is not provided here, it will be the max_iter\n of runner. Default: None.\n pct_start (float): The percentage of the cycle (in number of steps)\n spent increasing the learning rate.\n Default: 0.3\n anneal_strategy (str): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing.\n Default: 'cos'\n div_factor (float): Determines the initial learning rate via\n initial_lr = max_lr/div_factor\n Default: 25\n final_div_factor (float): Determines the minimum learning rate via\n min_lr = initial_lr/final_div_factor\n Default: 1e4\n three_phase (bool): If three_phase is True, use a third phase of the\n schedule to annihilate the learning rate according to\n final_div_factor instead of modifying the second phase (the first\n two phases will be symmetrical about the step indicated by\n pct_start).\n Default: False\n " def __init__(self, max_lr, total_steps=None, pct_start=0.3, anneal_strategy='cos', div_factor=25, final_div_factor=10000.0, three_phase=False, **kwargs): if ('by_epoch' not in kwargs): kwargs['by_epoch'] = False else: assert (not kwargs['by_epoch']), 'currently only support "by_epoch" = False' if (not isinstance(max_lr, (numbers.Number, list, dict))): raise ValueError(f'the type of max_lr must be the one of list or dict, but got {type(max_lr)}') self._max_lr = max_lr if (total_steps is not None): if (not isinstance(total_steps, int)): raise ValueError(f'the type of total_steps must be int, butgot {type(total_steps)}') self.total_steps = total_steps if ((pct_start < 0) or (pct_start > 1) or (not isinstance(pct_start, float))): raise ValueError(f'expected float between 0 and 1 pct_start, but got {pct_start}') self.pct_start = pct_start if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear self.div_factor = div_factor self.final_div_factor = final_div_factor self.three_phase = three_phase self.lr_phases = [] super(OneCycleLrUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if hasattr(self, 'total_steps'): total_steps = self.total_steps else: total_steps = runner.max_iters if (total_steps < runner.max_iters): raise ValueError(f'The total steps must be greater than or equal to max iterations {runner.max_iters} of runner, but total steps is {total_steps}.') if isinstance(runner.optimizer, dict): self.base_lr = {} for (k, optim) in runner.optimizer.items(): _max_lr = format_param(k, optim, self._max_lr) self.base_lr[k] = [(lr / self.div_factor) for lr in _max_lr] for (group, lr) in zip(optim.param_groups, self.base_lr[k]): group.setdefault('initial_lr', lr) else: k = type(runner.optimizer).__name__ _max_lr = format_param(k, runner.optimizer, self._max_lr) self.base_lr = [(lr / self.div_factor) for lr in _max_lr] for (group, lr) in zip(runner.optimizer.param_groups, self.base_lr): group.setdefault('initial_lr', lr) if self.three_phase: self.lr_phases.append([(float((self.pct_start * total_steps)) - 1), 1, self.div_factor]) self.lr_phases.append([(float(((2 * self.pct_start) * total_steps)) - 2), self.div_factor, 1]) self.lr_phases.append([(total_steps - 1), 1, (1 / self.final_div_factor)]) else: self.lr_phases.append([(float((self.pct_start * total_steps)) - 1), 1, self.div_factor]) self.lr_phases.append([(total_steps - 1), self.div_factor, (1 / self.final_div_factor)]) def get_lr(self, runner, base_lr): curr_iter = runner.iter start_iter = 0 for (i, (end_iter, start_lr, end_lr)) in enumerate(self.lr_phases): if (curr_iter <= end_iter): pct = ((curr_iter - start_iter) / (end_iter - start_iter)) lr = self.anneal_func((base_lr * start_lr), (base_lr * end_lr), pct) break start_iter = end_iter return lr
def annealing_cos(start, end, factor, weight=1): 'Calculate annealing cos learning rate.\n\n Cosine anneal from `weight * start + (1 - weight) * end` to `end` as\n percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the cosine annealing.\n end (float): The ending learing rate of the cosine annealing.\n factor (float): The coefficient of `pi` when calculating the current\n percentage. Range from 0.0 to 1.0.\n weight (float, optional): The combination factor of `start` and `end`\n when calculating the actual starting learning rate. Default to 1.\n ' cos_out = (cos((pi * factor)) + 1) return (end + (((0.5 * weight) * (start - end)) * cos_out))
def annealing_linear(start, end, factor): 'Calculate annealing linear learning rate.\n\n Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the linear annealing.\n end (float): The ending learing rate of the linear annealing.\n factor (float): The coefficient of `pi` when calculating the current\n percentage. Range from 0.0 to 1.0.\n ' return (start + ((end - start) * factor))
def format_param(name, optim, param): if isinstance(param, numbers.Number): return ([param] * len(optim.param_groups)) elif isinstance(param, (list, tuple)): if (len(param) != len(optim.param_groups)): raise ValueError(f'expected {len(optim.param_groups)} values for {name}, got {len(param)}') return param else: if (name not in param): raise KeyError(f'{name} is not found in {param.keys()}') return param[name]
@HOOKS.register_module() class EmptyCacheHook(Hook): def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): self._before_epoch = before_epoch self._after_epoch = after_epoch self._after_iter = after_iter def after_iter(self, runner): if self._after_iter: torch.cuda.empty_cache() def before_epoch(self, runner): if self._before_epoch: torch.cuda.empty_cache() def after_epoch(self, runner): if self._after_epoch: torch.cuda.empty_cache()
class MomentumUpdaterHook(Hook): def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9): if (warmup is not None): if (warmup not in ['constant', 'linear', 'exp']): raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"') if (warmup is not None): assert (warmup_iters > 0), '"warmup_iters" must be a positive integer' assert (0 < warmup_ratio <= 1.0), '"warmup_momentum" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.base_momentum = [] self.regular_momentum = [] def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) else: for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, base_momentum): raise NotImplementedError def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for k in runner.optimizer.keys(): _momentum_group = [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum[k]] momentum_groups.update({k: _momentum_group}) return momentum_groups else: return [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum] def get_warmup_momentum(self, cur_iters): def _get_warmup_momentum(cur_iters, regular_momentum): if (self.warmup == 'constant'): warmup_momentum = [(_momentum / self.warmup_ratio) for _momentum in regular_momentum] elif (self.warmup == 'linear'): k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio)) warmup_momentum = [(_momentum / (1 - k)) for _momentum in regular_momentum] elif (self.warmup == 'exp'): k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters))) warmup_momentum = [(_momentum / k) for _momentum in regular_momentum] return warmup_momentum if isinstance(self.regular_momentum, dict): momentum_groups = {} for (key, regular_momentum) in self.regular_momentum.items(): momentum_groups[key] = _get_warmup_momentum(cur_iters, regular_momentum) return momentum_groups else: return _get_warmup_momentum(cur_iters, self.regular_momentum) def before_run(self, runner): if isinstance(runner.optimizer, dict): self.base_momentum = {} for (k, optim) in runner.optimizer.items(): for group in optim.param_groups: if ('momentum' in group.keys()): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) _base_momentum = [group['initial_momentum'] for group in optim.param_groups] self.base_momentum.update({k: _base_momentum}) else: for group in runner.optimizer.param_groups: if ('momentum' in group.keys()): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) self.base_momentum = [group['initial_momentum'] for group in runner.optimizer.param_groups] def before_train_epoch(self, runner): if (not self.by_epoch): return self.regular_momentum = self.get_regular_momentum(runner) self._set_momentum(runner, self.regular_momentum) def before_train_iter(self, runner): cur_iter = runner.iter if (not self.by_epoch): self.regular_momentum = self.get_regular_momentum(runner) if ((self.warmup is None) or (cur_iter >= self.warmup_iters)): self._set_momentum(runner, self.regular_momentum) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum) elif self.by_epoch: if ((self.warmup is None) or (cur_iter > self.warmup_iters)): return elif (cur_iter == self.warmup_iters): self._set_momentum(runner, self.regular_momentum) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum)
@HOOKS.register_module() class StepMomentumUpdaterHook(MomentumUpdaterHook): "Step momentum scheduler with min value clipping.\n\n Args:\n step (int | list[int]): Step to decay the momentum. If an int value is\n given, regard it as the decay interval. If a list is given, decay\n momentum at these steps.\n gamma (float, optional): Decay momentum ratio. Default: 0.5.\n min_momentum (float, optional): Minimum momentum value to keep. If\n momentum after decay is lower than this value, it will be clipped\n accordingly. If None is given, we don't perform lr clipping.\n Default: None.\n " def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([(s > 0) for s in step]) elif isinstance(step, int): assert (step > 0) else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_momentum = min_momentum super(StepMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): progress = (runner.epoch if self.by_epoch else runner.iter) if isinstance(self.step, int): exp = (progress // self.step) else: exp = len(self.step) for (i, s) in enumerate(self.step): if (progress < s): exp = i break momentum = (base_momentum * (self.gamma ** exp)) if (self.min_momentum is not None): momentum = max(momentum, self.min_momentum) return momentum
@HOOKS.register_module() class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): assert ((min_momentum is None) ^ (min_momentum_ratio is None)) self.min_momentum = min_momentum self.min_momentum_ratio = min_momentum_ratio super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if (self.min_momentum_ratio is not None): target_momentum = (base_momentum * self.min_momentum_ratio) else: target_momentum = self.min_momentum return annealing_cos(base_momentum, target_momentum, (progress / max_progress))
@HOOKS.register_module() class CyclicMomentumUpdaterHook(MomentumUpdaterHook): "Cyclic momentum Scheduler.\n\n Implement the cyclical momentum scheduler policy described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n This momentum scheduler usually used together with the CyclicLRUpdater\n to improve the performance in the 3D detection area.\n\n Args:\n target_ratio (tuple[float]): Relative ratio of the lowest momentum and\n the highest momentum to the initial momentum.\n cyclic_times (int): Number of cycles during training\n step_ratio_up (float): The ratio of the increasing process of momentum\n in the total cycle.\n by_epoch (bool): Whether to update momentum by epoch.\n anneal_strategy (str, optional): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing. Default: 'cos'.\n gamma (float, optional): Cycle decay ratio. Default: 1.\n It takes values in the range (0, 1]. The difference between the\n maximum learning rate and the minimum learning rate decreases\n periodically when it is less than 1. `New in version 1.4.4.`\n " def __init__(self, by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', gamma=1, **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, (target_ratio / 100000.0)) elif isinstance(target_ratio, tuple): target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio) else: raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}') assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats' assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.gamma = gamma self.momentum_phases = [] if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear assert (not by_epoch), 'currently only support "by_epoch" = False' super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicMomentumUpdaterHook, self).before_run(runner) max_iter_per_phase = (runner.max_iters // self.cyclic_times) iter_up_phase = int((self.step_ratio_up * max_iter_per_phase)) self.max_iter_per_phase = max_iter_per_phase self.momentum_phases.append([0, iter_up_phase, 1, self.target_ratio[0]]) self.momentum_phases.append([iter_up_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]]) def get_momentum(self, runner, base_momentum): curr_iter = (runner.iter % self.max_iter_per_phase) curr_cycle = (runner.iter // self.max_iter_per_phase) scale = (self.gamma ** curr_cycle) for (start_iter, end_iter, start_ratio, end_ratio) in self.momentum_phases: if (start_iter <= curr_iter < end_iter): if (start_iter == 0): end_ratio = ((1 - scale) + (end_ratio * scale)) else: start_ratio = ((1 - scale) + (start_ratio * scale)) progress = (curr_iter - start_iter) return self.anneal_func((base_momentum * start_ratio), (base_momentum * end_ratio), (progress / (end_iter - start_iter)))
@HOOKS.register_module() class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): "OneCycle momentum Scheduler.\n\n This momentum scheduler usually used together with the OneCycleLrUpdater\n to improve the performance.\n\n Args:\n base_momentum (float or list): Lower momentum boundaries in the cycle\n for each parameter group. Note that momentum is cycled inversely\n to learning rate; at the peak of a cycle, momentum is\n 'base_momentum' and learning rate is 'max_lr'.\n Default: 0.85\n max_momentum (float or list): Upper momentum boundaries in the cycle\n for each parameter group. Functionally,\n it defines the cycle amplitude (max_momentum - base_momentum).\n Note that momentum is cycled inversely\n to learning rate; at the start of a cycle, momentum is\n 'max_momentum' and learning rate is 'base_lr'\n Default: 0.95\n pct_start (float): The percentage of the cycle (in number of steps)\n spent increasing the learning rate.\n Default: 0.3\n anneal_strategy (str): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing.\n Default: 'cos'\n three_phase (bool): If three_phase is True, use a third phase of the\n schedule to annihilate the learning rate according to\n final_div_factor instead of modifying the second phase (the first\n two phases will be symmetrical about the step indicated by\n pct_start).\n Default: False\n " def __init__(self, base_momentum=0.85, max_momentum=0.95, pct_start=0.3, anneal_strategy='cos', three_phase=False, **kwargs): if ('by_epoch' not in kwargs): kwargs['by_epoch'] = False else: assert (not kwargs['by_epoch']), 'currently only support "by_epoch" = False' if (not isinstance(base_momentum, (float, list, dict))): raise ValueError('base_momentum must be the type among of float,list or dict.') self._base_momentum = base_momentum if (not isinstance(max_momentum, (float, list, dict))): raise ValueError('max_momentum must be the type among of float,list or dict.') self._max_momentum = max_momentum if ((pct_start < 0) or (pct_start > 1) or (not isinstance(pct_start, float))): raise ValueError(f'Expected float between 0 and 1 pct_start, but got {pct_start}') self.pct_start = pct_start if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must by one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear self.three_phase = three_phase self.momentum_phases = [] super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): if (('momentum' not in optim.defaults) and ('betas' not in optim.defaults)): raise ValueError('optimizer must support momentum withoption enabled') self.use_beta1 = ('betas' in optim.defaults) _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for (group, b_momentum, m_momentum) in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: (_, beta2) = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum else: optim = runner.optimizer if (('momentum' not in optim.defaults) and ('betas' not in optim.defaults)): raise ValueError('optimizer must support momentum withoption enabled') self.use_beta1 = ('betas' in optim.defaults) k = type(optim).__name__ _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for (group, b_momentum, m_momentum) in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: (_, beta2) = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum if self.three_phase: self.momentum_phases.append({'end_iter': (float((self.pct_start * runner.max_iters)) - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum'}) self.momentum_phases.append({'end_iter': (float(((2 * self.pct_start) * runner.max_iters)) - 2), 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum'}) self.momentum_phases.append({'end_iter': (runner.max_iters - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'max_momentum'}) else: self.momentum_phases.append({'end_iter': (float((self.pct_start * runner.max_iters)) - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum'}) self.momentum_phases.append({'end_iter': (runner.max_iters - 1), 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum'}) def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) else: for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, param_group): curr_iter = runner.iter start_iter = 0 for (i, phase) in enumerate(self.momentum_phases): end_iter = phase['end_iter'] if ((curr_iter <= end_iter) or (i == (len(self.momentum_phases) - 1))): pct = ((curr_iter - start_iter) / (end_iter - start_iter)) momentum = self.anneal_func(param_group[phase['start_momentum']], param_group[phase['end_momentum']], pct) break start_iter = end_iter return momentum def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for (k, optim) in runner.optimizer.items(): _momentum_group = [self.get_momentum(runner, param_group) for param_group in optim.param_groups] momentum_groups.update({k: _momentum_group}) return momentum_groups else: momentum_groups = [] for param_group in runner.optimizer.param_groups: momentum_groups.append(self.get_momentum(runner, param_group)) return momentum_groups
@HOOKS.register_module() class OptimizerHook(Hook): 'A hook contains custom operations for the optimizer.\n\n Args:\n grad_clip (dict, optional): A config dict to control the clip_grad.\n Default: None.\n detect_anomalous_params (bool): This option is only used for\n debugging which will slow down the training speed.\n Detect anomalous parameters that are not included in\n the computational graph with `loss` as the root.\n There are two cases\n\n - Parameters were not used during\n forward pass.\n - Parameters were not used to produce\n loss.\n Default: False.\n ' def __init__(self, grad_clip=None, detect_anomalous_params=False): self.grad_clip = grad_clip self.detect_anomalous_params = detect_anomalous_params def clip_grads(self, params): params = list(filter((lambda p: (p.requires_grad and (p.grad is not None))), params)) if (len(params) > 0): return clip_grad.clip_grad_norm_(params, **self.grad_clip) def after_train_iter(self, runner): runner.optimizer.zero_grad() if self.detect_anomalous_params: self.detect_anomalous_parameters(runner.outputs['loss'], runner) runner.outputs['loss'].backward() if (self.grad_clip is not None): grad_norm = self.clip_grads(runner.model.parameters()) if (grad_norm is not None): runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() def detect_anomalous_parameters(self, loss, runner): logger = runner.logger parameters_in_graph = set() visited = set() def traverse(grad_fn): if (grad_fn is None): return if (grad_fn not in visited): visited.add(grad_fn) if hasattr(grad_fn, 'variable'): parameters_in_graph.add(grad_fn.variable) parents = grad_fn.next_functions if (parents is not None): for parent in parents: grad_fn = parent[0] traverse(grad_fn) traverse(loss.grad_fn) for (n, p) in runner.model.named_parameters(): if ((p not in parameters_in_graph) and p.requires_grad): logger.log(level=logging.ERROR, msg=f'''{n} with shape {p.size()} is not in the computational graph ''')
@HOOKS.register_module() class GradientCumulativeOptimizerHook(OptimizerHook): 'Optimizer Hook implements multi-iters gradient cumulating.\n\n Args:\n cumulative_iters (int, optional): Num of gradient cumulative iters.\n The optimizer will step every `cumulative_iters` iters.\n Defaults to 1.\n\n Examples:\n >>> # Use cumulative_iters to simulate a large batch size\n >>> # It is helpful when the hardware cannot handle a large batch size.\n >>> loader = DataLoader(data, batch_size=64)\n >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4)\n >>> # almost equals to\n >>> loader = DataLoader(data, batch_size=256)\n >>> optim_hook = OptimizerHook()\n ' def __init__(self, cumulative_iters=1, **kwargs): super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) assert (isinstance(cumulative_iters, int) and (cumulative_iters > 0)), f'cumulative_iters only accepts positive int, but got {type(cumulative_iters)} instead.' self.cumulative_iters = cumulative_iters self.divisible_iters = 0 self.remainder_iters = 0 self.initialized = False def has_batch_norm(self, module): if isinstance(module, _BatchNorm): return True for m in module.children(): if self.has_batch_norm(m): return True return False def _init(self, runner): if ((runner.iter % self.cumulative_iters) != 0): runner.logger.warning('Resume iter number is not divisible by cumulative_iters in GradientCumulativeOptimizerHook, which means the gradient of some iters is lost and the result may be influenced slightly.') if (self.has_batch_norm(runner.model) and (self.cumulative_iters > 1)): runner.logger.warning('GradientCumulativeOptimizerHook may slightly decrease performance if the model has BatchNorm layers.') residual_iters = (runner.max_iters - runner.iter) self.divisible_iters = ((residual_iters // self.cumulative_iters) * self.cumulative_iters) self.remainder_iters = (residual_iters - self.divisible_iters) self.initialized = True def after_train_iter(self, runner): if (not self.initialized): self._init(runner) if (runner.iter < self.divisible_iters): loss_factor = self.cumulative_iters else: loss_factor = self.remainder_iters loss = runner.outputs['loss'] loss = (loss / loss_factor) loss.backward() if (self.every_n_iters(runner, self.cumulative_iters) or self.is_last_iter(runner)): if (self.grad_clip is not None): grad_norm = self.clip_grads(runner.model.parameters()) if (grad_norm is not None): runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() runner.optimizer.zero_grad()
@HOOKS.register_module() class ProfilerHook(Hook): "Profiler to analyze performance during training.\n\n PyTorch Profiler is a tool that allows the collection of the performance\n metrics during the training. More details on Profiler can be found at\n https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile\n\n Args:\n by_epoch (bool): Profile performance by epoch or by iteration.\n Default: True.\n profile_iters (int): Number of iterations for profiling.\n If ``by_epoch=True``, profile_iters indicates that they are the\n first profile_iters epochs at the beginning of the\n training, otherwise it indicates the first profile_iters\n iterations. Default: 1.\n activities (list[str]): List of activity groups (CPU, CUDA) to use in\n profiling. Default: ['cpu', 'cuda'].\n schedule (dict, optional): Config of generating the callable schedule.\n if schedule is None, profiler will not add step markers into the\n trace and table view. Default: None.\n on_trace_ready (callable, dict): Either a handler or a dict of generate\n handler. Default: None.\n record_shapes (bool): Save information about operator's input shapes.\n Default: False.\n profile_memory (bool): Track tensor memory allocation/deallocation.\n Default: False.\n with_stack (bool): Record source information (file and line number)\n for the ops. Default: False.\n with_flops (bool): Use formula to estimate the FLOPS of specific\n operators (matrix multiplication and 2D convolution).\n Default: False.\n json_trace_path (str, optional): Exports the collected trace in Chrome\n JSON format. Default: None.\n\n Example:\n >>> runner = ... # instantiate a Runner\n >>> # tensorboard trace\n >>> trace_config = dict(type='tb_trace', dir_name='work_dir')\n >>> profiler_config = dict(on_trace_ready=trace_config)\n >>> runner.register_profiler_hook(profiler_config)\n >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)])\n " def __init__(self, by_epoch: bool=True, profile_iters: int=1, activities: List[str]=['cpu', 'cuda'], schedule: Optional[dict]=None, on_trace_ready: Optional[Union[(Callable, dict)]]=None, record_shapes: bool=False, profile_memory: bool=False, with_stack: bool=False, with_flops: bool=False, json_trace_path: Optional[str]=None) -> None: try: from torch import profiler except ImportError: raise ImportError(f'profiler is the new feature of torch1.8.1, but your version is {torch.__version__}') assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' self.by_epoch = by_epoch if (profile_iters < 1): raise ValueError(f'profile_iters should be greater than 0, but got {profile_iters}') self.profile_iters = profile_iters if (not isinstance(activities, list)): raise ValueError(f'activities should be list, but got {type(activities)}') self.activities = [] for activity in activities: activity = activity.lower() if (activity == 'cpu'): self.activities.append(profiler.ProfilerActivity.CPU) elif (activity == 'cuda'): self.activities.append(profiler.ProfilerActivity.CUDA) else: raise ValueError(f'activity should be "cpu" or "cuda", but got {activity}') if (schedule is not None): self.schedule = profiler.schedule(**schedule) else: self.schedule = None self.on_trace_ready = on_trace_ready self.record_shapes = record_shapes self.profile_memory = profile_memory self.with_stack = with_stack self.with_flops = with_flops self.json_trace_path = json_trace_path @master_only def before_run(self, runner): if (self.by_epoch and (runner.max_epochs < self.profile_iters)): raise ValueError(f'self.profile_iters should not be greater than {runner.max_epochs}') if ((not self.by_epoch) and (runner.max_iters < self.profile_iters)): raise ValueError(f'self.profile_iters should not be greater than {runner.max_iters}') if callable(self.on_trace_ready): _on_trace_ready = self.on_trace_ready elif isinstance(self.on_trace_ready, dict): trace_cfg = self.on_trace_ready.copy() trace_type = trace_cfg.pop('type') if (trace_type == 'log_trace'): def _log_handler(prof): print(prof.key_averages().table(**trace_cfg)) _on_trace_ready = _log_handler elif (trace_type == 'tb_trace'): try: import torch_tb_profiler except ImportError: raise ImportError('please run "pip install torch-tb-profiler" to install torch_tb_profiler') _on_trace_ready = torch.profiler.tensorboard_trace_handler(**trace_cfg) else: raise ValueError(f'trace_type should be "log_trace" or "tb_trace", but got {trace_type}') elif (self.on_trace_ready is None): _on_trace_ready = None else: raise ValueError(f'on_trace_ready should be handler, dict or None, but got {type(self.on_trace_ready)}') if (self.by_epoch and (runner.max_epochs > 1)): warnings.warn(f'profiler will profile {runner.max_epochs} epochs instead of 1 epoch. Since profiler will slow down the training, it is recommended to train 1 epoch with ProfilerHook and adjust your setting according to the profiler summary. During normal training (epoch > 1), you may disable the ProfilerHook.') self.profiler = torch.profiler.profile(activities=self.activities, schedule=self.schedule, on_trace_ready=_on_trace_ready, record_shapes=self.record_shapes, profile_memory=self.profile_memory, with_stack=self.with_stack, with_flops=self.with_flops) self.profiler.__enter__() runner.logger.info('profiler is profiling...') @master_only def after_train_epoch(self, runner): if (self.by_epoch and (runner.epoch == (self.profile_iters - 1))): runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if (self.json_trace_path is not None): self.profiler.export_chrome_trace(self.json_trace_path) @master_only def after_train_iter(self, runner): self.profiler.step() if ((not self.by_epoch) and (runner.iter == (self.profile_iters - 1))): runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if (self.json_trace_path is not None): self.profiler.export_chrome_trace(self.json_trace_path)
@HOOKS.register_module() class DistSamplerSeedHook(Hook): 'Data-loading sampler for distributed training.\n\n When distributed training, it is only useful in conjunction with\n :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same\n purpose with :obj:`IterLoader`.\n ' def before_epoch(self, runner): if hasattr(runner.data_loader.sampler, 'set_epoch'): runner.data_loader.sampler.set_epoch(runner.epoch) elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
@HOOKS.register_module() class SyncBuffersHook(Hook): 'Synchronize model buffers such as running_mean and running_var in BN at\n the end of each epoch.\n\n Args:\n distributed (bool): Whether distributed training is used. It is\n effective only for distributed training. Defaults to True.\n ' def __init__(self, distributed=True): self.distributed = distributed def after_epoch(self, runner): 'All-reduce model buffers at the end of each epoch.' if self.distributed: allreduce_params(runner.model.buffers())
class IterLoader(): def __init__(self, dataloader): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._epoch = 0 @property def epoch(self): return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, 'set_epoch'): self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __len__(self): return len(self._dataloader)
@RUNNERS.register_module() class IterBasedRunner(BaseRunner): 'Iteration-based Runner.\n\n This runner train models iteration by iteration.\n ' def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._epoch = data_loader.epoch data_batch = next(data_loader) self.call_hook('before_train_iter') outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('model.train_step() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_train_iter') self._inner_iter += 1 self._iter += 1 @torch.no_grad() def val(self, data_loader, **kwargs): self.model.eval() self.mode = 'val' self.data_loader = data_loader data_batch = next(data_loader) self.call_hook('before_val_iter') outputs = self.model.val_step(data_batch, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('model.val_step() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_val_iter') self._inner_iter += 1 def run(self, data_loaders, workflow, max_iters=None, **kwargs): "Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, iters) to specify the\n running order and iterations. E.g, [('train', 10000),\n ('val', 1000)] means running 10000 iterations for training and\n 1000 iterations for validation, iteratively.\n " assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert (len(data_loaders) == len(workflow)) if (max_iters is not None): warnings.warn('setting max_iters in run is deprecated, please set max_iters in runner_config', DeprecationWarning) self._max_iters = max_iters assert (self._max_iters is not None), 'max_iters must be specified during instantiation' work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE') self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info()) self.logger.info('workflow: %s, max: %d iters', workflow, self._max_iters) self.call_hook('before_run') iter_loaders = [IterLoader(x) for x in data_loaders] self.call_hook('before_epoch') while (self.iter < self._max_iters): for (i, flow) in enumerate(workflow): self._inner_iter = 0 (mode, iters) = flow if ((not isinstance(mode, str)) or (not hasattr(self, mode))): raise ValueError('runner has no method named "{}" to run a workflow'.format(mode)) iter_runner = getattr(self, mode) for _ in range(iters): if ((mode == 'train') and (self.iter >= self._max_iters)): break iter_runner(iter_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_epoch') self.call_hook('after_run') def resume(self, checkpoint, resume_optimizer=True, map_location='default'): "Resume model from checkpoint.\n\n Args:\n checkpoint (str): Checkpoint to resume from.\n resume_optimizer (bool, optional): Whether resume the optimizer(s)\n if the checkpoint file includes optimizer(s). Default to True.\n map_location (str, optional): Same as :func:`torch.load`.\n Default to 'default'.\n " if (map_location == 'default'): device_id = torch.cuda.current_device() checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id))) else: checkpoint = self.load_checkpoint(checkpoint, map_location=map_location) self._epoch = checkpoint['meta']['epoch'] self._iter = checkpoint['meta']['iter'] self._inner_iter = checkpoint['meta']['iter'] if (('optimizer' in checkpoint) and resume_optimizer): if isinstance(self.optimizer, Optimizer): self.optimizer.load_state_dict(checkpoint['optimizer']) elif isinstance(self.optimizer, dict): for k in self.optimizer.keys(): self.optimizer[k].load_state_dict(checkpoint['optimizer'][k]) else: raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}') self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') def save_checkpoint(self, out_dir, filename_tmpl='iter_{}.pth', meta=None, save_optimizer=True, create_symlink=True): "Save checkpoint to file.\n\n Args:\n out_dir (str): Directory to save checkpoint files.\n filename_tmpl (str, optional): Checkpoint file template.\n Defaults to 'iter_{}.pth'.\n meta (dict, optional): Metadata to be saved in checkpoint.\n Defaults to None.\n save_optimizer (bool, optional): Whether save optimizer.\n Defaults to True.\n create_symlink (bool, optional): Whether create symlink to the\n latest checkpoint file. Defaults to True.\n " if (meta is None): meta = {} elif (not isinstance(meta, dict)): raise TypeError(f'meta should be a dict or None, but got {type(meta)}') if (self.meta is not None): meta.update(self.meta) meta.update(epoch=(self.epoch + 1), iter=self.iter) filename = filename_tmpl.format((self.iter + 1)) filepath = osp.join(out_dir, filename) optimizer = (self.optimizer if save_optimizer else None) save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) if create_symlink: dst_file = osp.join(out_dir, 'latest.pth') if (platform.system() != 'Windows'): mmcv.symlink(filename, dst_file) else: shutil.copy(filepath, dst_file) def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, custom_hooks_config=None): 'Register default hooks for iter-based training.\n\n Checkpoint hook, optimizer stepper hook and logger hooks will be set to\n `by_epoch=False` by default.\n\n Default hooks include:\n\n +----------------------+-------------------------+\n | Hooks | Priority |\n +======================+=========================+\n | LrUpdaterHook | VERY_HIGH (10) |\n +----------------------+-------------------------+\n | MomentumUpdaterHook | HIGH (30) |\n +----------------------+-------------------------+\n | OptimizerStepperHook | ABOVE_NORMAL (40) |\n +----------------------+-------------------------+\n | CheckpointSaverHook | NORMAL (50) |\n +----------------------+-------------------------+\n | IterTimerHook | LOW (70) |\n +----------------------+-------------------------+\n | LoggerHook(s) | VERY_LOW (90) |\n +----------------------+-------------------------+\n | CustomHook(s) | defaults to NORMAL (50) |\n +----------------------+-------------------------+\n\n If custom hooks have same priority with default hooks, custom hooks\n will be triggered after default hooks.\n ' if (checkpoint_config is not None): checkpoint_config.setdefault('by_epoch', False) if (lr_config is not None): lr_config.setdefault('by_epoch', False) if (log_config is not None): for info in log_config['hooks']: info.setdefault('by_epoch', False) super(IterBasedRunner, self).register_training_hooks(lr_config=lr_config, momentum_config=momentum_config, optimizer_config=optimizer_config, checkpoint_config=checkpoint_config, log_config=log_config, timer_config=IterTimerHook(), custom_hooks_config=custom_hooks_config)
class LogBuffer(): def __init__(self): self.val_history = OrderedDict() self.n_history = OrderedDict() self.output = OrderedDict() self.ready = False def clear(self): self.val_history.clear() self.n_history.clear() self.clear_output() def clear_output(self): self.output.clear() self.ready = False def update(self, vars, count=1): assert isinstance(vars, dict) for (key, var) in vars.items(): if (key not in self.val_history): self.val_history[key] = [] self.n_history[key] = [] self.val_history[key].append(var) self.n_history[key].append(count) def average(self, n=0): 'Average latest n values or all values.' assert (n >= 0) for key in self.val_history: values = np.array(self.val_history[key][(- n):]) nums = np.array(self.n_history[key][(- n):]) avg = (np.sum((values * nums)) / np.sum(nums)) self.output[key] = avg self.ready = True
def register_torch_optimizers(): torch_optimizers = [] for module_name in dir(torch.optim): if module_name.startswith('__'): continue _optim = getattr(torch.optim, module_name) if (inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer)): OPTIMIZERS.register_module()(_optim) torch_optimizers.append(module_name) return torch_optimizers
def build_optimizer_constructor(cfg): return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor(dict(type=constructor_type, optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)) optimizer = optim_constructor(model) return optimizer
@OPTIMIZER_BUILDERS.register_module() class DefaultOptimizerConstructor(): "Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following fields:\n\n - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If\n one of the keys in ``custom_keys`` is a substring of the name of one\n parameter, then the setting of the parameter will be specified by\n ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will\n be ignored. It should be noted that the aforementioned ``key`` is the\n longest key that is a substring of the name of the parameter. If there\n are multiple matched keys with the same length, then the key with lower\n alphabet order will be chosen.\n ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``\n and ``decay_mult``. See Example 2 below.\n - ``bias_lr_mult`` (float): It will be multiplied to the learning\n rate for all bias parameters (except for those in normalization\n layers and offset layers of DCN).\n - ``bias_decay_mult`` (float): It will be multiplied to the weight\n decay for all bias parameters (except for those in\n normalization layers, depthwise conv layers, offset layers of DCN).\n - ``norm_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of normalization\n layers.\n - ``dwconv_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of depthwise conv\n layers.\n - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning\n rate for parameters of offset layer in the deformable convs\n of a model.\n - ``bypass_duplicate`` (bool): If true, the duplicate parameters\n would not be added into optimizer. Default: False.\n\n Note:\n\n 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n override the effect of ``bias_lr_mult`` in the bias of offset layer.\n So be careful when using both ``bias_lr_mult`` and\n ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset\n layer in deformable convs, set ``dcn_offset_lr_mult`` to the original\n ``dcn_offset_lr_mult`` * ``bias_lr_mult``.\n\n 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n apply it to all the DCN layers in the model. So be careful when the\n model contains multiple DCN layers in places other than backbone.\n\n Args:\n model (:obj:`nn.Module`): The model with parameters to be optimized.\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are\n\n - `type`: class name of the optimizer.\n\n Optional fields are\n\n - any arguments of the corresponding optimizer type, e.g.,\n lr, weight_decay, momentum, etc.\n paramwise_cfg (dict, optional): Parameter-wise options.\n\n Example 1:\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> paramwise_cfg = dict(norm_decay_mult=0.)\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n\n Example 2:\n >>> # assume model have attribute model.backbone and model.cls_head\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)\n >>> paramwise_cfg = dict(custom_keys={\n 'backbone': dict(lr_mult=0.1, decay_mult=0.9)})\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n >>> # Then the `lr` and `weight_decay` for model.backbone is\n >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for\n >>> # model.cls_head is (0.01, 0.95).\n " def __init__(self, optimizer_cfg, paramwise_cfg=None): if (not isinstance(optimizer_cfg, dict)): raise TypeError('optimizer_cfg should be a dict', f'but got {type(optimizer_cfg)}') self.optimizer_cfg = optimizer_cfg self.paramwise_cfg = ({} if (paramwise_cfg is None) else paramwise_cfg) self.base_lr = optimizer_cfg.get('lr', None) self.base_wd = optimizer_cfg.get('weight_decay', None) self._validate_cfg() def _validate_cfg(self): if (not isinstance(self.paramwise_cfg, dict)): raise TypeError(f'paramwise_cfg should be None or a dict, but got {type(self.paramwise_cfg)}') if ('custom_keys' in self.paramwise_cfg): if (not isinstance(self.paramwise_cfg['custom_keys'], dict)): raise TypeError(f"If specified, custom_keys must be a dict, but got {type(self.paramwise_cfg['custom_keys'])}") if (self.base_wd is None): for key in self.paramwise_cfg['custom_keys']: if ('decay_mult' in self.paramwise_cfg['custom_keys'][key]): raise ValueError('base_wd should not be None') if (('bias_decay_mult' in self.paramwise_cfg) or ('norm_decay_mult' in self.paramwise_cfg) or ('dwconv_decay_mult' in self.paramwise_cfg)): if (self.base_wd is None): raise ValueError('base_wd should not be None') def _is_in(self, param_group, param_group_list): assert is_list_of(param_group_list, dict) param = set(param_group['params']) param_set = set() for group in param_group_list: param_set.update(set(group['params'])) return (not param.isdisjoint(param_set)) def add_params(self, params, module, prefix='', is_dcn_module=None): "Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n is_dcn_module (int|float|None): If the current module is a\n submodule of DCN, `is_dcn_module` will be passed to\n control conv_offset layer's learning rate. Defaults to None.\n " custom_keys = self.paramwise_cfg.get('custom_keys', {}) sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.0) bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.0) norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.0) dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.0) bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.0) is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) is_dwconv = (isinstance(module, torch.nn.Conv2d) and (module.in_channels == module.groups)) for (name, param) in module.named_parameters(recurse=False): param_group = {'params': [param]} if (not param.requires_grad): params.append(param_group) continue if (bypass_duplicate and self._is_in(param_group, params)): warnings.warn(f'{prefix} is duplicate. It is skipped since bypass_duplicate={bypass_duplicate}') continue is_custom = False for key in sorted_keys: if (key in f'{prefix}.{name}'): is_custom = True lr_mult = custom_keys[key].get('lr_mult', 1.0) param_group['lr'] = (self.base_lr * lr_mult) if (self.base_wd is not None): decay_mult = custom_keys[key].get('decay_mult', 1.0) param_group['weight_decay'] = (self.base_wd * decay_mult) break if (not is_custom): if ((name == 'bias') and (not (is_norm or is_dcn_module))): param_group['lr'] = (self.base_lr * bias_lr_mult) if ((prefix.find('conv_offset') != (- 1)) and is_dcn_module and isinstance(module, torch.nn.Conv2d)): param_group['lr'] = (self.base_lr * dcn_offset_lr_mult) if (self.base_wd is not None): if is_norm: param_group['weight_decay'] = (self.base_wd * norm_decay_mult) elif is_dwconv: param_group['weight_decay'] = (self.base_wd * dwconv_decay_mult) elif ((name == 'bias') and (not is_dcn_module)): param_group['weight_decay'] = (self.base_wd * bias_decay_mult) params.append(param_group) if check_ops_exist(): from mmcv.ops import DeformConv2d, ModulatedDeformConv2d is_dcn_module = isinstance(module, (DeformConv2d, ModulatedDeformConv2d)) else: is_dcn_module = False for (child_name, child_mod) in module.named_children(): child_prefix = (f'{prefix}.{child_name}' if prefix else child_name) self.add_params(params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module) def __call__(self, model): if hasattr(model, 'module'): model = model.module optimizer_cfg = self.optimizer_cfg.copy() if (not self.paramwise_cfg): optimizer_cfg['params'] = model.parameters() return build_from_cfg(optimizer_cfg, OPTIMIZERS) params = [] self.add_params(params, model) optimizer_cfg['params'] = params return build_from_cfg(optimizer_cfg, OPTIMIZERS)
class Priority(Enum): 'Hook priority levels.\n\n +--------------+------------+\n | Level | Value |\n +==============+============+\n | HIGHEST | 0 |\n +--------------+------------+\n | VERY_HIGH | 10 |\n +--------------+------------+\n | HIGH | 30 |\n +--------------+------------+\n | ABOVE_NORMAL | 40 |\n +--------------+------------+\n | NORMAL | 50 |\n +--------------+------------+\n | BELOW_NORMAL | 60 |\n +--------------+------------+\n | LOW | 70 |\n +--------------+------------+\n | VERY_LOW | 90 |\n +--------------+------------+\n | LOWEST | 100 |\n +--------------+------------+\n ' HIGHEST = 0 VERY_HIGH = 10 HIGH = 30 ABOVE_NORMAL = 40 NORMAL = 50 BELOW_NORMAL = 60 LOW = 70 VERY_LOW = 90 LOWEST = 100
def get_priority(priority): 'Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n ' if isinstance(priority, int): if ((priority < 0) or (priority > 100)): raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
def get_host_info(): 'Get hostname and username.\n\n Return empty string if exception raised, e.g. ``getpass.getuser()`` will\n lead to error in docker container\n ' host = '' try: host = f'{getuser()}@{gethostname()}' except Exception as e: warnings.warn(f'Host or user not found: {str(e)}') finally: return host
def get_time_str(): return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def obj_from_dict(info, parent=None, default_args=None): 'Initialize an object from dict.\n\n The dict must contain the key "type", which indicates the object type, it\n can be either a string or type, such as "list" or ``list``. Remaining\n fields are treated as the arguments for constructing the object.\n\n Args:\n info (dict): Object types and arguments.\n parent (:class:`module`): Module which may containing expected object\n classes.\n default_args (dict, optional): Default arguments for initializing the\n object.\n\n Returns:\n any type: Object built from the dict.\n ' assert (isinstance(info, dict) and ('type' in info)) assert (isinstance(default_args, dict) or (default_args is None)) args = info.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if (parent is not None): obj_type = getattr(parent, obj_type) else: obj_type = sys.modules[obj_type] elif (not isinstance(obj_type, type)): raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}') if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) return obj_type(**args)
def set_random_seed(seed, deterministic=False, use_rank_shift=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n rank_shift (bool): Whether to add rank number to the random seed to\n have different random seed in different threads. Default: False.\n ' if use_rank_shift: (rank, _) = mmcv.runner.get_dist_info() seed += rank random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ['PYTHONHASHSEED'] = str(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def is_tensorrt_available(): try: import tensorrt del tensorrt return True except ModuleNotFoundError: return False
def get_tensorrt_op_path(): 'Get TensorRT plugins library path.' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_trt.*.so') paths = glob.glob(wildcard) lib_path = (paths[0] if (len(paths) > 0) else '') return lib_path
def is_tensorrt_plugin_loaded(): 'Check if TensorRT plugins library is loaded or not.\n\n Returns:\n bool: plugin_is_loaded flag\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded return plugin_is_loaded
def load_tensorrt_plugin(): 'load TensorRT plugins library.' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded lib_path = get_tensorrt_op_path() if ((not plugin_is_loaded) and os.path.exists(lib_path)): ctypes.CDLL(lib_path) plugin_is_loaded = True
def preprocess_onnx(onnx_model): 'Modify onnx model to match with TensorRT plugins in mmcv.\n\n There are some conflict between onnx node definition and TensorRT limit.\n This function perform preprocess on the onnx model to solve the conflicts.\n For example, onnx `attribute` is loaded in TensorRT on host and onnx\n `input` is loaded on device. The shape inference is performed on host, so\n any `input` related to shape (such as `max_output_boxes_per_class` in\n NonMaxSuppression) should be transformed to `attribute` before conversion.\n\n Arguments:\n onnx_model (onnx.ModelProto): Input onnx model.\n\n Returns:\n onnx.ModelProto: Modified onnx model.\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) graph = onnx_model.graph nodes = graph.node initializers = graph.initializer node_dict = {} for node in nodes: node_outputs = node.output for output in node_outputs: if (len(output) > 0): node_dict[output] = node init_dict = {_.name: _ for _ in initializers} nodes_name_to_remove = set() def is_node_without_output(name): for (node_name, node) in node_dict.items(): if (node_name not in nodes_name_to_remove): if (name in node.input): return False return True def mark_nodes_to_remove(name): node = node_dict[name] nodes_name_to_remove.add(name) for input_node_name in node.input: if is_node_without_output(input_node_name): mark_nodes_to_remove(input_node_name) def parse_data(name, typ, default_value=0): if (name in node_dict): node = node_dict[name] if (node.op_type == 'Constant'): raw_data = node.attribute[0].t.raw_data else: mark_nodes_to_remove(name) return default_value elif (name in init_dict): raw_data = init_dict[name].raw_data else: raise ValueError(f'{name} not found in node or initilizer.') return np.frombuffer(raw_data, typ).item() nrof_node = len(nodes) for idx in range(nrof_node): node = nodes[idx] node_attributes = node.attribute node_inputs = node.input node_outputs = node.output node_name = node.name if (node.op_type == 'NonMaxSuppression'): center_point_box = 0 max_output_boxes_per_class = 1000000 iou_threshold = 0.3 score_threshold = 0.0 offset = 0 for attribute in node_attributes: if (attribute.name == 'center_point_box'): center_point_box = attribute.i elif (attribute.name == 'offset'): offset = attribute.i if (len(node_inputs) >= 3): max_output_boxes_per_class = parse_data(node_inputs[2], np.int64, max_output_boxes_per_class) mark_nodes_to_remove(node_inputs[2]) if (len(node_inputs) >= 4): iou_threshold = parse_data(node_inputs[3], np.float32, iou_threshold) mark_nodes_to_remove(node_inputs[3]) if (len(node_inputs) >= 5): score_threshold = parse_data(node_inputs[4], np.float32) mark_nodes_to_remove(node_inputs[4]) new_node = onnx.helper.make_node('NonMaxSuppression', node_inputs[:2], node_outputs, name=node_name, center_point_box=center_point_box, max_output_boxes_per_class=max_output_boxes_per_class, iou_threshold=iou_threshold, score_threshold=score_threshold, offset=offset) for output in node_outputs: if (output in node_dict): node_dict[output] = new_node nodes.insert(idx, new_node) nodes.remove(node) elif (node.op_type == 'InstanceNormalization'): node.op_type = 'MMCVInstanceNormalization' for node_name in nodes_name_to_remove: nodes.remove(node_dict[node_name]) return onnx_model
def onnx2trt(onnx_model, opt_shape_dict, log_level=trt.Logger.ERROR, fp16_mode=False, max_workspace_size=0, device_id=0): 'Convert onnx model to tensorrt engine.\n\n Arguments:\n onnx_model (str or onnx.ModelProto): the onnx model to convert from\n opt_shape_dict (dict): the min/opt/max shape of each input\n log_level (TensorRT log level): the log level of TensorRT\n fp16_mode (bool): enable fp16 mode\n max_workspace_size (int): set max workspace size of TensorRT engine.\n some tactic and layers need large workspace.\n device_id (int): choice the device to create engine.\n\n Returns:\n tensorrt.ICudaEngine: the TensorRT engine created from onnx_model\n\n Example:\n >>> engine = onnx2trt(\n >>> "onnx_model.onnx",\n >>> {\'input\': [[1, 3, 160, 160],\n >>> [1, 3, 320, 320],\n >>> [1, 3, 640, 640]]},\n >>> log_level=trt.Logger.WARNING,\n >>> fp16_mode=True,\n >>> max_workspace_size=1 << 30,\n >>> device_id=0)\n >>> })\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) device = torch.device('cuda:{}'.format(device_id)) logger = trt.Logger(log_level) builder = trt.Builder(logger) EXPLICIT_BATCH = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(EXPLICIT_BATCH) parser = trt.OnnxParser(network, logger) if isinstance(onnx_model, str): onnx_model = onnx.load(onnx_model) onnx_model = preprocess_onnx(onnx_model) if (not parser.parse(onnx_model.SerializeToString())): error_msgs = '' for error in range(parser.num_errors): error_msgs += f'''{parser.get_error(error)} ''' raise RuntimeError(f'''parse onnx failed: {error_msgs}''') builder.max_workspace_size = max_workspace_size config = builder.create_builder_config() config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for (input_name, param) in opt_shape_dict.items(): min_shape = tuple(param[0][:]) opt_shape = tuple(param[1][:]) max_shape = tuple(param[2][:]) profile.set_shape(input_name, min_shape, opt_shape, max_shape) config.add_optimization_profile(profile) if fp16_mode: builder.fp16_mode = fp16_mode config.set_flag(trt.BuilderFlag.FP16) with torch.cuda.device(device): engine = builder.build_engine(network, config) return engine
def save_trt_engine(engine, path): 'Serialize TensorRT engine to disk.\n\n Arguments:\n engine (tensorrt.ICudaEngine): TensorRT engine to serialize\n path (str): disk path to write the engine\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with open(path, mode='wb') as f: f.write(bytearray(engine.serialize()))
def load_trt_engine(path): 'Deserialize TensorRT engine from disk.\n\n Arguments:\n path (str): disk path to read the engine\n\n Returns:\n tensorrt.ICudaEngine: the TensorRT engine loaded from disk\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with trt.Logger() as logger, trt.Runtime(logger) as runtime: with open(path, mode='rb') as f: engine_bytes = f.read() engine = runtime.deserialize_cuda_engine(engine_bytes) return engine
def torch_dtype_from_trt(dtype): 'Convert pytorch dtype to TensorRT dtype.' if (dtype == trt.bool): return torch.bool elif (dtype == trt.int8): return torch.int8 elif (dtype == trt.int32): return torch.int32 elif (dtype == trt.float16): return torch.float16 elif (dtype == trt.float32): return torch.float32 else: raise TypeError(('%s is not supported by torch' % dtype))
def torch_device_from_trt(device): 'Convert pytorch device to TensorRT device.' if (device == trt.TensorLocation.DEVICE): return torch.device('cuda') elif (device == trt.TensorLocation.HOST): return torch.device('cpu') else: return TypeError(('%s is not supported by torch' % device))
class TRTWrapper(torch.nn.Module): 'TensorRT engine Wrapper.\n\n Arguments:\n engine (tensorrt.ICudaEngine): TensorRT engine to wrap\n input_names (list[str]): names of each inputs\n output_names (list[str]): names of each outputs\n\n Note:\n If the engine is converted from onnx model. The input_names and\n output_names should be the same as onnx model.\n ' def __init__(self, engine, input_names=None, output_names=None): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) super(TRTWrapper, self).__init__() self.engine = engine if isinstance(self.engine, str): self.engine = load_trt_engine(engine) if (not isinstance(self.engine, trt.ICudaEngine)): raise TypeError('engine should be str or trt.ICudaEngine') self._register_state_dict_hook(TRTWrapper._on_state_dict) self.context = self.engine.create_execution_context() if ((input_names is None) or (output_names is None)): names = [_ for _ in self.engine] input_names = list(filter(self.engine.binding_is_input, names)) output_names = list((set(names) - set(input_names))) self.input_names = input_names self.output_names = output_names def _on_state_dict(self, state_dict, prefix, local_metadata): state_dict[(prefix + 'engine')] = bytearray(self.engine.serialize()) state_dict[(prefix + 'input_names')] = self.input_names state_dict[(prefix + 'output_names')] = self.output_names def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): engine_bytes = state_dict[(prefix + 'engine')] with trt.Logger() as logger, trt.Runtime(logger) as runtime: self.engine = runtime.deserialize_cuda_engine(engine_bytes) self.context = self.engine.create_execution_context() self.input_names = state_dict[(prefix + 'input_names')] self.output_names = state_dict[(prefix + 'output_names')] def forward(self, inputs): '\n Arguments:\n inputs (dict): dict of input name-tensors pair\n\n Return:\n dict: dict of output name-tensors pair\n ' assert (self.input_names is not None) assert (self.output_names is not None) bindings = ([None] * (len(self.input_names) + len(self.output_names))) for (input_name, input_tensor) in inputs.items(): idx = self.engine.get_binding_index(input_name) if (input_tensor.dtype == torch.long): input_tensor = input_tensor.int() self.context.set_binding_shape(idx, tuple(input_tensor.shape)) bindings[idx] = input_tensor.contiguous().data_ptr() outputs = {} for (i, output_name) in enumerate(self.output_names): idx = self.engine.get_binding_index(output_name) dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) shape = tuple(self.context.get_binding_shape(idx)) device = torch_device_from_trt(self.engine.get_location(idx)) output = torch.empty(size=shape, dtype=dtype, device=device) outputs[output_name] = output bindings[idx] = output.data_ptr() self.context.execute_async_v2(bindings, torch.cuda.current_stream().cuda_stream) return outputs
class TRTWraper(TRTWrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('TRTWraper will be deprecated in future. Please use TRTWrapper instead', DeprecationWarning)
class ConfigDict(Dict): def __missing__(self, name): raise KeyError(name) def __getattr__(self, name): try: value = super(ConfigDict, self).__getattr__(name) except KeyError: ex = AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") except Exception as e: ex = e else: return value raise ex
def add_args(parser, cfg, prefix=''): for (k, v) in cfg.items(): if isinstance(v, str): parser.add_argument((('--' + prefix) + k)) elif isinstance(v, int): parser.add_argument((('--' + prefix) + k), type=int) elif isinstance(v, float): parser.add_argument((('--' + prefix) + k), type=float) elif isinstance(v, bool): parser.add_argument((('--' + prefix) + k), action='store_true') elif isinstance(v, dict): add_args(parser, v, ((prefix + k) + '.')) elif isinstance(v, abc.Iterable): parser.add_argument((('--' + prefix) + k), type=type(v[0]), nargs='+') else: print(f'cannot parse key {(prefix + k)} of type {type(v)}') return parser
class Config(): 'A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {\'b1\': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile(\'tests/data/config/a.py\')\n >>> cfg.filename\n "/home/kchen/projects/mmcv/tests/data/config/a.py"\n >>> cfg.item4\n \'test\'\n >>> cfg\n "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "\n "{\'item1\': [1, 2], \'item2\': {\'a\': 0}, \'item3\': True, \'item4\': \'test\'}"\n ' @staticmethod def _validate_py_syntax(filename): with open(filename, 'r', encoding='utf-8') as f: content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError(f'There are syntax errors in config file {filename}: {e}') @staticmethod def _substitute_predefined_vars(filename, temp_config_name): file_dirname = osp.dirname(filename) file_basename = osp.basename(filename) file_basename_no_extension = osp.splitext(file_basename)[0] file_extname = osp.splitext(filename)[1] support_templates = dict(fileDirname=file_dirname, fileBasename=file_basename, fileBasenameNoExtension=file_basename_no_extension, fileExtname=file_extname) with open(filename, 'r', encoding='utf-8') as f: config_file = f.read() for (key, value) in support_templates.items(): regexp = (('\\{\\{\\s*' + str(key)) + '\\s*\\}\\}') value = value.replace('\\', '/') config_file = re.sub(regexp, value, config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) @staticmethod def _pre_substitute_base_vars(filename, temp_config_name): 'Substitute base variable placehoders to string, so that parsing\n would work.' with open(filename, 'r', encoding='utf-8') as f: config_file = f.read() base_var_dict = {} regexp = (('\\{\\{\\s*' + BASE_KEY) + '\\.([\\w\\.]+)\\s*\\}\\}') base_vars = set(re.findall(regexp, config_file)) for base_var in base_vars: randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' base_var_dict[randstr] = base_var regexp = (((('\\{\\{\\s*' + BASE_KEY) + '\\.') + base_var) + '\\s*\\}\\}') config_file = re.sub(regexp, f'"{randstr}"', config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) return base_var_dict @staticmethod def _substitute_base_vars(cfg, base_var_dict, base_cfg): 'Substitute variable strings to their actual values.' cfg = copy.deepcopy(cfg) if isinstance(cfg, dict): for (k, v) in cfg.items(): if (isinstance(v, str) and (v in base_var_dict)): new_v = base_cfg for new_k in base_var_dict[v].split('.'): new_v = new_v[new_k] cfg[k] = new_v elif isinstance(v, (list, tuple, dict)): cfg[k] = Config._substitute_base_vars(v, base_var_dict, base_cfg) elif isinstance(cfg, tuple): cfg = tuple((Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg)) elif isinstance(cfg, list): cfg = [Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg] elif (isinstance(cfg, str) and (cfg in base_var_dict)): new_v = base_cfg for new_k in base_var_dict[cfg].split('.'): new_v = new_v[new_k] cfg = new_v return cfg @staticmethod def _file2dict(filename, use_predefined_variables=True): filename = osp.abspath(osp.expanduser(filename)) check_file_exist(filename) fileExtname = osp.splitext(filename)[1] if (fileExtname not in ['.py', '.json', '.yaml', '.yml']): raise IOError('Only py/yml/yaml/json type are supported now!') with tempfile.TemporaryDirectory() as temp_config_dir: temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=fileExtname) if (platform.system() == 'Windows'): temp_config_file.close() temp_config_name = osp.basename(temp_config_file.name) if use_predefined_variables: Config._substitute_predefined_vars(filename, temp_config_file.name) else: shutil.copyfile(filename, temp_config_file.name) base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name) if filename.endswith('.py'): temp_module_name = osp.splitext(temp_config_name)[0] sys.path.insert(0, temp_config_dir) Config._validate_py_syntax(filename) mod = import_module(temp_module_name) sys.path.pop(0) cfg_dict = {name: value for (name, value) in mod.__dict__.items() if (not name.startswith('__'))} del sys.modules[temp_module_name] elif filename.endswith(('.yml', '.yaml', '.json')): import mmcv cfg_dict = mmcv.load(temp_config_file.name) temp_config_file.close() if (DEPRECATION_KEY in cfg_dict): deprecation_info = cfg_dict.pop(DEPRECATION_KEY) warning_msg = f'The config file {filename} will be deprecated in the future.' if ('expected' in deprecation_info): warning_msg += f" Please use {deprecation_info['expected']} instead." if ('reference' in deprecation_info): warning_msg += f" More information can be found at {deprecation_info['reference']}" warnings.warn(warning_msg, DeprecationWarning) cfg_text = (filename + '\n') with open(filename, 'r', encoding='utf-8') as f: cfg_text += f.read() if (BASE_KEY in cfg_dict): cfg_dir = osp.dirname(filename) base_filename = cfg_dict.pop(BASE_KEY) base_filename = (base_filename if isinstance(base_filename, list) else [base_filename]) cfg_dict_list = list() cfg_text_list = list() for f in base_filename: (_cfg_dict, _cfg_text) = Config._file2dict(osp.join(cfg_dir, f)) cfg_dict_list.append(_cfg_dict) cfg_text_list.append(_cfg_text) base_cfg_dict = dict() for c in cfg_dict_list: duplicate_keys = (base_cfg_dict.keys() & c.keys()) if (len(duplicate_keys) > 0): raise KeyError(f'Duplicate key is not allowed among bases. Duplicate keys: {duplicate_keys}') base_cfg_dict.update(c) cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict) base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) cfg_dict = base_cfg_dict cfg_text_list.append(cfg_text) cfg_text = '\n'.join(cfg_text_list) return (cfg_dict, cfg_text) @staticmethod def _merge_a_into_b(a, b, allow_list_keys=False): "merge dict ``a`` into dict ``b`` (non-inplace).\n\n Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid\n in-place modifications.\n\n Args:\n a (dict): The source dict to be merged into ``b``.\n b (dict): The origin dict to be fetch keys from ``a``.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in source ``a`` and will replace the element of the\n corresponding index in b if b is a list. Default: False.\n\n Returns:\n dict: The modified dict of ``b`` using ``a``.\n\n Examples:\n # Normally merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # Delete b first and merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # b is a list\n >>> Config._merge_a_into_b(\n ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)\n [{'a': 2}, {'b': 2}]\n " b = b.copy() for (k, v) in a.items(): if (allow_list_keys and k.isdigit() and isinstance(b, list)): k = int(k) if (len(b) <= k): raise KeyError(f'Index {k} exceeds the length of list {b}') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) elif isinstance(v, dict): if ((k in b) and (not v.pop(DELETE_KEY, False))): allowed_types = ((dict, list) if allow_list_keys else dict) if (not isinstance(b[k], allowed_types)): raise TypeError(f'{k}={v} in child config cannot inherit from base because {k} is a dict in the child config but is of type {type(b[k])} in base config. You may set `{DELETE_KEY}=True` to ignore the base config.') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) else: b[k] = ConfigDict(v) else: b[k] = v return b @staticmethod def fromfile(filename, use_predefined_variables=True, import_custom_modules=True): (cfg_dict, cfg_text) = Config._file2dict(filename, use_predefined_variables) if (import_custom_modules and cfg_dict.get('custom_imports', None)): import_modules_from_strings(**cfg_dict['custom_imports']) return Config(cfg_dict, cfg_text=cfg_text, filename=filename) @staticmethod def fromstring(cfg_str, file_format): 'Generate config from config str.\n\n Args:\n cfg_str (str): Config str.\n file_format (str): Config file format corresponding to the\n config str. Only py/yml/yaml/json type are supported now!\n\n Returns:\n :obj:`Config`: Config obj.\n ' if (file_format not in ['.py', '.json', '.yaml', '.yml']): raise IOError('Only py/yml/yaml/json type are supported now!') if ((file_format != '.py') and ('dict(' in cfg_str)): warnings.warn('Please check "file_format", the file format may be .py') with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix=file_format, delete=False) as temp_file: temp_file.write(cfg_str) cfg = Config.fromfile(temp_file.name) os.remove(temp_file.name) return cfg @staticmethod def auto_argparser(description=None): 'Generate argparser from config file automatically (experimental)' partial_parser = ArgumentParser(description=description) partial_parser.add_argument('config', help='config file path') cfg_file = partial_parser.parse_known_args()[0].config cfg = Config.fromfile(cfg_file) parser = ArgumentParser(description=description) parser.add_argument('config', help='config file path') add_args(parser, cfg) return (parser, cfg) def __init__(self, cfg_dict=None, cfg_text=None, filename=None): if (cfg_dict is None): cfg_dict = dict() elif (not isinstance(cfg_dict, dict)): raise TypeError(f'cfg_dict must be a dict, but got {type(cfg_dict)}') for key in cfg_dict: if (key in RESERVED_KEYS): raise KeyError(f'{key} is reserved for config file') super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) super(Config, self).__setattr__('_filename', filename) if cfg_text: text = cfg_text elif filename: with open(filename, 'r') as f: text = f.read() else: text = '' super(Config, self).__setattr__('_text', text) @property def filename(self): return self._filename @property def text(self): return self._text @property def pretty_text(self): indent = 4 def _indent(s_, num_spaces): s = s_.split('\n') if (len(s) == 1): return s_ first = s.pop(0) s = [((num_spaces * ' ') + line) for line in s] s = '\n'.join(s) s = ((first + '\n') + s) return s def _format_basic_types(k, v, use_mapping=False): if isinstance(v, str): v_str = f"'{v}'" else: v_str = str(v) if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = _indent(attr_str, indent) return attr_str def _format_list(k, v, use_mapping=False): if all((isinstance(_, dict) for _ in v)): v_str = '[\n' v_str += '\n'.join((f'dict({_indent(_format_dict(v_), indent)}),' for v_ in v)).rstrip(',') if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = (_indent(attr_str, indent) + ']') else: attr_str = _format_basic_types(k, v, use_mapping) return attr_str def _contain_invalid_identifier(dict_str): contain_invalid_identifier = False for key_name in dict_str: contain_invalid_identifier |= (not str(key_name).isidentifier()) return contain_invalid_identifier def _format_dict(input_dict, outest_level=False): r = '' s = [] use_mapping = _contain_invalid_identifier(input_dict) if use_mapping: r += '{' for (idx, (k, v)) in enumerate(input_dict.items()): is_last = (idx >= (len(input_dict) - 1)) end = ('' if (outest_level or is_last) else ',') if isinstance(v, dict): v_str = ('\n' + _format_dict(v)) if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: dict({v_str}' else: attr_str = f'{str(k)}=dict({v_str}' attr_str = ((_indent(attr_str, indent) + ')') + end) elif isinstance(v, list): attr_str = (_format_list(k, v, use_mapping) + end) else: attr_str = (_format_basic_types(k, v, use_mapping) + end) s.append(attr_str) r += '\n'.join(s) if use_mapping: r += '}' return r cfg_dict = self._cfg_dict.to_dict() text = _format_dict(cfg_dict, outest_level=True) yapf_style = dict(based_on_style='pep8', blank_line_before_nested_class_or_def=True, split_before_expression_after_opening_paren=True) (text, _) = FormatCode(text, style_config=yapf_style, verify=True) return text def __repr__(self): return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' def __len__(self): return len(self._cfg_dict) def __getattr__(self, name): return getattr(self._cfg_dict, name) def __getitem__(self, name): return self._cfg_dict.__getitem__(name) def __setattr__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setattr__(name, value) def __setitem__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setitem__(name, value) def __iter__(self): return iter(self._cfg_dict) def __getstate__(self): return (self._cfg_dict, self._filename, self._text) def __copy__(self): cls = self.__class__ other = cls.__new__(cls) other.__dict__.update(self.__dict__) return other def __deepcopy__(self, memo): cls = self.__class__ other = cls.__new__(cls) memo[id(self)] = other for (key, value) in self.__dict__.items(): super(Config, other).__setattr__(key, copy.deepcopy(value, memo)) return other def __setstate__(self, state): (_cfg_dict, _filename, _text) = state super(Config, self).__setattr__('_cfg_dict', _cfg_dict) super(Config, self).__setattr__('_filename', _filename) super(Config, self).__setattr__('_text', _text) def dump(self, file=None): cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() if self.filename.endswith('.py'): if (file is None): return self.pretty_text else: with open(file, 'w', encoding='utf-8') as f: f.write(self.pretty_text) else: import mmcv if (file is None): file_format = self.filename.split('.')[(- 1)] return mmcv.dump(cfg_dict, file_format=file_format) else: mmcv.dump(cfg_dict, file) def merge_from_dict(self, options, allow_list_keys=True): "Merge list into cfg_dict.\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n\n Examples:\n >>> options = {'model.backbone.depth': 50,\n ... 'model.backbone.with_cp':True}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(\n ... model=dict(backbone=dict(depth=50, with_cp=True)))\n\n >>> # Merge list element\n >>> cfg = Config(dict(pipeline=[\n ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))\n >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})\n >>> cfg.merge_from_dict(options, allow_list_keys=True)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(pipeline=[\n ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])\n\n Args:\n options (dict): dict of configs to merge from.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in ``options`` and will replace the element of the\n corresponding index in the config if the config is a list.\n Default: True.\n " option_cfg_dict = {} for (full_key, v) in options.items(): d = option_cfg_dict key_list = full_key.split('.') for subkey in key_list[:(- 1)]: d.setdefault(subkey, ConfigDict()) d = d[subkey] subkey = key_list[(- 1)] d[subkey] = v cfg_dict = super(Config, self).__getattribute__('_cfg_dict') super(Config, self).__setattr__('_cfg_dict', Config._merge_a_into_b(option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))
class DictAction(Action): "\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options can\n be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit\n brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build\n list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'\n " @staticmethod def _parse_int_float_bool(val): try: return int(val) except ValueError: pass try: return float(val) except ValueError: pass if (val.lower() in ['true', 'false']): return (True if (val.lower() == 'true') else False) return val @staticmethod def _parse_iterable(val): "Parse iterable values in the string.\n\n All elements inside '()' or '[]' are treated as iterable values.\n\n Args:\n val (str): Value string.\n\n Returns:\n list | tuple: The expanded list or tuple from the string.\n\n Examples:\n >>> DictAction._parse_iterable('1,2,3')\n [1, 2, 3]\n >>> DictAction._parse_iterable('[a, b, c]')\n ['a', 'b', 'c']\n >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')\n [(1, 2, 3), ['a', 'b'], 'c']\n " def find_next_comma(string): "Find the position of next comma in the string.\n\n If no ',' is found in the string, return the string length. All\n chars inside '()' and '[]' are treated as one element and thus ','\n inside these brackets are ignored.\n " assert ((string.count('(') == string.count(')')) and (string.count('[') == string.count(']'))), f'Imbalanced brackets exist in {string}' end = len(string) for (idx, char) in enumerate(string): pre = string[:idx] if ((char == ',') and (pre.count('(') == pre.count(')')) and (pre.count('[') == pre.count(']'))): end = idx break return end val = val.strip('\'"').replace(' ', '') is_tuple = False if (val.startswith('(') and val.endswith(')')): is_tuple = True val = val[1:(- 1)] elif (val.startswith('[') and val.endswith(']')): val = val[1:(- 1)] elif (',' not in val): return DictAction._parse_int_float_bool(val) values = [] while (len(val) > 0): comma_idx = find_next_comma(val) element = DictAction._parse_iterable(val[:comma_idx]) values.append(element) val = val[(comma_idx + 1):] if is_tuple: values = tuple(values) return values def __call__(self, parser, namespace, values, option_string=None): options = {} for kv in values: (key, val) = kv.split('=', maxsplit=1) options[key] = self._parse_iterable(val) setattr(namespace, self.dest, options)
def collect_env(): 'Collect the information of the running environments.\n\n Returns:\n dict: The environment information. The following fields are contained.\n\n - sys.platform: The variable of ``sys.platform``.\n - Python: Python version.\n - CUDA available: Bool, indicating if CUDA is available.\n - GPU devices: Device type of each GPU.\n - CUDA_HOME (optional): The env var ``CUDA_HOME``.\n - NVCC (optional): NVCC version.\n - GCC: GCC version, "n/a" if GCC is not installed.\n - PyTorch: PyTorch version.\n - PyTorch compiling details: The output of ``torch.__config__.show()``.\n - TorchVision (optional): TorchVision version.\n - OpenCV: OpenCV version.\n - MMCV: MMCV version.\n - MMCV Compiler: The GCC version for compiling MMCV ops.\n - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.\n ' env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for (name, device_ids) in devices.items(): env_info[('GPU ' + ','.join(device_ids))] = name from mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if ((CUDA_HOME is not None) and osp.isdir(CUDA_HOME)): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output(f'"{nvcc}" -V | tail -n1', shell=True) nvcc = nvcc.decode('utf-8').strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: gcc = subprocess.check_output('gcc --version | head -n1', shell=True) gcc = gcc.decode('utf-8').strip() env_info['GCC'] = gcc except subprocess.CalledProcessError: env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info
def check_ops_exist(): ext_loader = pkgutil.find_loader('mmcv._ext') return (ext_loader is not None)
def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): 'Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n "Error" thus be silent most of the time.\n file_mode (str): The file mode used in opening log file.\n Defaults to \'w\'.\n\n Returns:\n logging.Logger: The expected logger.\n ' logger = logging.getLogger(name) if (name in logger_initialized): return logger for logger_name in logger_initialized: if name.startswith(logger_name): return logger for handler in logger.root.handlers: if (type(handler) is logging.StreamHandler): handler.setLevel(logging.ERROR) stream_handler = logging.StreamHandler() handlers = [stream_handler] if (dist.is_available() and dist.is_initialized()): rank = dist.get_rank() else: rank = 0 if ((rank == 0) and (log_file is not None)): file_handler = logging.FileHandler(log_file, file_mode) handlers.append(file_handler) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') for handler in handlers: handler.setFormatter(formatter) handler.setLevel(log_level) logger.addHandler(handler) if (rank == 0): logger.setLevel(log_level) else: logger.setLevel(logging.ERROR) logger_initialized[name] = True return logger
def print_log(msg, logger=None, level=logging.INFO): 'Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used.\n Some special loggers are:\n - "silent": no message will be printed.\n - other str: the logger obtained with `get_root_logger(logger)`.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or "root".\n ' if (logger is None): print(msg) elif isinstance(logger, logging.Logger): logger.log(level, msg) elif (logger == 'silent'): pass elif isinstance(logger, str): _logger = get_logger(logger) _logger.log(level, msg) else: raise TypeError(f'logger should be either a logging.Logger object, str, "silent" or None, but got {type(logger)}')
def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse
def is_str(x): 'Whether the input is an string instance.\n\n Note: This method is deprecated since python 2 is no longer supported.\n ' return isinstance(x, str)
def import_modules_from_strings(imports, allow_failed_imports=False): "Import modules from the given list of strings.\n\n Args:\n imports (list | str | None): The given module names to be imported.\n allow_failed_imports (bool): If True, the failed imports will return\n None. Otherwise, an ImportError is raise. Default: False.\n\n Returns:\n list[module] | module | None: The imported modules.\n\n Examples:\n >>> osp, sys = import_modules_from_strings(\n ... ['os.path', 'sys'])\n >>> import os.path as osp_\n >>> import sys as sys_\n >>> assert osp == osp_\n >>> assert sys == sys_\n " if (not imports): return single_import = False if isinstance(imports, str): single_import = True imports = [imports] if (not isinstance(imports, list)): raise TypeError(f'custom_imports must be a list but got type {type(imports)}') imported = [] for imp in imports: if (not isinstance(imp, str)): raise TypeError(f'{imp} is of type {type(imp)} and cannot be imported.') try: imported_tmp = import_module(imp) except ImportError: if allow_failed_imports: warnings.warn(f'{imp} failed to import and is ignored.', UserWarning) imported_tmp = None else: raise ImportError imported.append(imported_tmp) if single_import: imported = imported[0] return imported
def iter_cast(inputs, dst_type, return_type=None): 'Cast elements of an iterable object into some type.\n\n Args:\n inputs (Iterable): The input object.\n dst_type (type): Destination type.\n return_type (type, optional): If specified, the output object will be\n converted to this type, otherwise an iterator.\n\n Returns:\n iterator or specified type: The converted object.\n ' if (not isinstance(inputs, abc.Iterable)): raise TypeError('inputs must be an iterable object') if (not isinstance(dst_type, type)): raise TypeError('"dst_type" must be a valid type') out_iterable = map(dst_type, inputs) if (return_type is None): return out_iterable else: return return_type(out_iterable)
def list_cast(inputs, dst_type): 'Cast elements of an iterable object into a list of some type.\n\n A partial method of :func:`iter_cast`.\n ' return iter_cast(inputs, dst_type, return_type=list)
def tuple_cast(inputs, dst_type): 'Cast elements of an iterable object into a tuple of some type.\n\n A partial method of :func:`iter_cast`.\n ' return iter_cast(inputs, dst_type, return_type=tuple)
def is_seq_of(seq, expected_type, seq_type=None): 'Check whether it is a sequence of some type.\n\n Args:\n seq (Sequence): The sequence to be checked.\n expected_type (type): Expected type of sequence items.\n seq_type (type, optional): Expected sequence type.\n\n Returns:\n bool: Whether the sequence is valid.\n ' if (seq_type is None): exp_seq_type = abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if (not isinstance(seq, exp_seq_type)): return False for item in seq: if (not isinstance(item, expected_type)): return False return True
def is_list_of(seq, expected_type): 'Check whether it is a list of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=list)
def is_tuple_of(seq, expected_type): 'Check whether it is a tuple of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=tuple)
def slice_list(in_list, lens): 'Slice a list into several sub lists by a list of given length.\n\n Args:\n in_list (list): The list to be sliced.\n lens(int or list): The expected length of each out list.\n\n Returns:\n list: A list of sliced list.\n ' if isinstance(lens, int): assert ((len(in_list) % lens) == 0) lens = ([lens] * int((len(in_list) / lens))) if (not isinstance(lens, list)): raise TypeError('"indices" must be an integer or a list of integers') elif (sum(lens) != len(in_list)): raise ValueError(f'sum of lens and list length does not match: {sum(lens)} != {len(in_list)}') out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:(idx + lens[i])]) idx += lens[i] return out_list
def concat_list(in_list): 'Concatenate a list of list into a single list.\n\n Args:\n in_list (list): The list of list to be merged.\n\n Returns:\n list: The concatenated flat list.\n ' return list(itertools.chain(*in_list))
def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not found, please install them first.'): 'A decorator factory to check if prerequisites are satisfied.\n\n Args:\n prerequisites (str of list[str]): Prerequisites to be checked.\n checker (callable): The checker method that returns True if a\n prerequisite is meet, False otherwise.\n msg_tmpl (str): The message template with two variables.\n\n Returns:\n decorator: A specific decorator.\n ' def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites) missing = [] for item in requirements: if (not checker(item)): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
def _check_py_package(package): try: import_module(package) except ImportError: return False else: return True
def _check_executable(cmd): if (subprocess.call(f'which {cmd}', shell=True) != 0): return False else: return True
def requires_package(prerequisites): "A decorator to check if some python packages are installed.\n\n Example:\n >>> @requires_package('numpy')\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n array([0.])\n >>> @requires_package(['numpy', 'non_package'])\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n ImportError\n " return check_prerequisites(prerequisites, checker=_check_py_package)
def requires_executable(prerequisites): "A decorator to check if some executable files are installed.\n\n Example:\n >>> @requires_executable('ffmpeg')\n >>> func(arg1, args):\n >>> print(1)\n 1\n " return check_prerequisites(prerequisites, checker=_check_executable)
def deprecated_api_warning(name_dict, cls_name=None): 'A decorator to check if some arguments are deprecate and try to replace\n deprecate src_arg_name to dst_arg_name.\n\n Args:\n name_dict(dict):\n key (str): Deprecate argument names.\n val (str): Expected argument names.\n\n Returns:\n func: New function.\n ' def api_warning_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): args_info = getfullargspec(old_func) func_name = old_func.__name__ if (cls_name is not None): func_name = f'{cls_name}.{func_name}' if args: arg_names = args_info.args[:len(args)] for (src_arg_name, dst_arg_name) in name_dict.items(): if (src_arg_name in arg_names): warnings.warn(f'"{src_arg_name}" is deprecated in `{func_name}`, please use "{dst_arg_name}" instead', DeprecationWarning) arg_names[arg_names.index(src_arg_name)] = dst_arg_name if kwargs: for (src_arg_name, dst_arg_name) in name_dict.items(): if (src_arg_name in kwargs): assert (dst_arg_name not in kwargs), f'The expected behavior is to replace the deprecated key `{src_arg_name}` to new key `{dst_arg_name}`, but got them in the arguments at the same time, which is confusing. `{src_arg_name} will be deprecated in the future, please use `{dst_arg_name}` instead.' warnings.warn(f'"{src_arg_name}" is deprecated in `{func_name}`, please use "{dst_arg_name}" instead', DeprecationWarning) kwargs[dst_arg_name] = kwargs.pop(src_arg_name) output = old_func(*args, **kwargs) return output return new_func return api_warning_wrapper
def is_method_overridden(method, base_class, derived_class): 'Check if a method of base class is overridden in derived class.\n\n Args:\n method (str): the method name to check.\n base_class (type): the class of the base class.\n derived_class (type | Any): the class or instance of the derived class.\n ' assert isinstance(base_class, type), "base_class doesn't accept instance, Please pass class instead." if (not isinstance(derived_class, type)): derived_class = derived_class.__class__ base_method = getattr(base_class, method) derived_method = getattr(derived_class, method) return (derived_method != base_method)
def has_method(obj: object, method: str) -> bool: 'Check whether the object has a method.\n\n Args:\n method (str): The method name to check.\n obj (object): The object to check.\n\n Returns:\n bool: True if the object has the method else False.\n ' return (hasattr(obj, method) and callable(getattr(obj, method)))
def is_rocm_pytorch() -> bool: is_rocm = False if (TORCH_VERSION != 'parrots'): try: from torch.utils.cpp_extension import ROCM_HOME is_rocm = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass return is_rocm
def _get_cuda_home(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import CUDA_HOME elif is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME CUDA_HOME = ROCM_HOME else: from torch.utils.cpp_extension import CUDA_HOME return CUDA_HOME
def get_build_config(): if (TORCH_VERSION == 'parrots'): from parrots.config import get_build_info return get_build_info() else: return torch.__config__.show()
def _get_conv(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin else: from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin return (_ConvNd, _ConvTransposeMixin)
def _get_dataloader(): if (TORCH_VERSION == 'parrots'): from torch.utils.data import DataLoader, PoolDataLoader else: from torch.utils.data import DataLoader PoolDataLoader = DataLoader return (DataLoader, PoolDataLoader)
def _get_extension(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import BuildExtension, Extension CppExtension = partial(Extension, cuda=False) CUDAExtension = partial(Extension, cuda=True) else: from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension return (BuildExtension, CppExtension, CUDAExtension)
def _get_pool(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.pool import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd else: from torch.nn.modules.pooling import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd return (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd)
def _get_norm(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm2d else: from torch.nn.modules.batchnorm import _BatchNorm from torch.nn.modules.instancenorm import _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm return (_BatchNorm, _InstanceNorm, SyncBatchNorm_)
class SyncBatchNorm(SyncBatchNorm_): def _check_input_dim(self, input): if (TORCH_VERSION == 'parrots'): if (input.dim() < 2): raise ValueError(f'expected at least 2D input (got {input.dim()}D input)') else: super()._check_input_dim(input)
def is_filepath(x): return (is_str(x) or isinstance(x, Path))