code
stringlengths
17
6.64M
class ModuleList(BaseModule, nn.ModuleList): 'ModuleList in openmmlab.\n\n Args:\n modules (iterable, optional): an iterable of modules to add.\n init_cfg (dict, optional): Initialization config dict.\n ' def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleList.__init__(self, modules)
class ModuleDict(BaseModule, nn.ModuleDict): 'ModuleDict in openmmlab.\n\n Args:\n modules (dict, optional): a mapping (dictionary) of (string: module)\n or an iterable of key-value pairs of type (string, module).\n init_cfg (dict, optional): Initialization config dict.\n ' def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, init_cfg) nn.ModuleDict.__init__(self, modules)
class BaseRunner(metaclass=ABCMeta): 'The base class of Runner, a training helper for PyTorch.\n\n All subclasses should implement the following APIs:\n\n - ``run()``\n - ``train()``\n - ``val()``\n - ``save_checkpoint()``\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n batch_processor (callable): A callable method that process a data\n batch. The interface of this method should be\n `batch_processor(model, data, train_mode) -> dict`\n optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an\n optimizer (in most cases) or a dict of optimizers (in models that\n requires more than one optimizer, e.g., GAN).\n work_dir (str, optional): The working directory to save checkpoints\n and logs. Defaults to None.\n logger (:obj:`logging.Logger`): Logger used during training.\n Defaults to None. (The default value is just for backward\n compatibility)\n meta (dict | None): A dict records some import information such as\n environment info and seed, which will be logged in logger hook.\n Defaults to None.\n max_epochs (int, optional): Total training epochs.\n max_iters (int, optional): Total training iterations.\n ' def __init__(self, model, batch_processor=None, optimizer=None, work_dir=None, logger=None, meta=None, max_iters=None, max_epochs=None): if (batch_processor is not None): if (not callable(batch_processor)): raise TypeError(f'batch_processor must be callable, but got {type(batch_processor)}') warnings.warn('batch_processor is deprecated, please implement train_step() and val_step() in the model instead.', DeprecationWarning) if is_module_wrapper(model): _model = model.module else: _model = model if (hasattr(_model, 'train_step') or hasattr(_model, 'val_step')): raise RuntimeError('batch_processor and model.train_step()/model.val_step() cannot be both available.') else: assert hasattr(model, 'train_step') if isinstance(optimizer, dict): for (name, optim) in optimizer.items(): if (not isinstance(optim, Optimizer)): raise TypeError(f'optimizer must be a dict of torch.optim.Optimizers, but optimizer["{name}"] is a {type(optim)}') elif ((not isinstance(optimizer, Optimizer)) and (optimizer is not None)): raise TypeError(f'optimizer must be a torch.optim.Optimizer object or dict or None, but got {type(optimizer)}') if (not isinstance(logger, logging.Logger)): raise TypeError(f'logger must be a logging.Logger object, but got {type(logger)}') if ((meta is not None) and (not isinstance(meta, dict))): raise TypeError(f'meta must be a dict or None, but got {type(meta)}') self.model = model self.batch_processor = batch_processor self.optimizer = optimizer self.logger = logger self.meta = meta if mmcv.is_str(work_dir): self.work_dir = osp.abspath(work_dir) mmcv.mkdir_or_exist(self.work_dir) elif (work_dir is None): self.work_dir = None else: raise TypeError('"work_dir" must be a str or None') if hasattr(self.model, 'module'): self._model_name = self.model.module.__class__.__name__ else: self._model_name = self.model.__class__.__name__ (self._rank, self._world_size) = get_dist_info() self.timestamp = get_time_str() self.mode = None self._hooks = [] self._epoch = 0 self._iter = 0 self._inner_iter = 0 if ((max_epochs is not None) and (max_iters is not None)): raise ValueError('Only one of `max_epochs` or `max_iters` can be set.') self._max_epochs = max_epochs self._max_iters = max_iters self.log_buffer = LogBuffer() @property def model_name(self): 'str: Name of the model, usually the module class name.' return self._model_name @property def rank(self): 'int: Rank of current process. (distributed training)' return self._rank @property def world_size(self): 'int: Number of processes participating in the job.\n (distributed training)' return self._world_size @property def hooks(self): 'list[:obj:`Hook`]: A list of registered hooks.' return self._hooks @property def epoch(self): 'int: Current epoch.' return self._epoch @property def iter(self): 'int: Current iteration.' return self._iter @property def inner_iter(self): 'int: Iteration in an epoch.' return self._inner_iter @property def max_epochs(self): 'int: Maximum training epochs.' return self._max_epochs @property def max_iters(self): 'int: Maximum training iterations.' return self._max_iters @abstractmethod def train(self): pass @abstractmethod def val(self): pass @abstractmethod def run(self, data_loaders, workflow, **kwargs): pass @abstractmethod def save_checkpoint(self, out_dir, filename_tmpl, save_optimizer=True, meta=None, create_symlink=True): pass def current_lr(self): 'Get current learning rates.\n\n Returns:\n list[float] | dict[str, list[float]]: Current learning rates of all\n param groups. If the runner has a dict of optimizers, this method\n will return a dict.\n ' if isinstance(self.optimizer, torch.optim.Optimizer): lr = [group['lr'] for group in self.optimizer.param_groups] elif isinstance(self.optimizer, dict): lr = dict() for (name, optim) in self.optimizer.items(): lr[name] = [group['lr'] for group in optim.param_groups] else: raise RuntimeError('lr is not applicable because optimizer does not exist.') return lr def current_momentum(self): 'Get current momentums.\n\n Returns:\n list[float] | dict[str, list[float]]: Current momentums of all\n param groups. If the runner has a dict of optimizers, this method\n will return a dict.\n ' def _get_momentum(optimizer): momentums = [] for group in optimizer.param_groups: if ('momentum' in group.keys()): momentums.append(group['momentum']) elif ('betas' in group.keys()): momentums.append(group['betas'][0]) else: momentums.append(0) return momentums if (self.optimizer is None): raise RuntimeError('momentum is not applicable because optimizer does not exist.') elif isinstance(self.optimizer, torch.optim.Optimizer): momentums = _get_momentum(self.optimizer) elif isinstance(self.optimizer, dict): momentums = dict() for (name, optim) in self.optimizer.items(): momentums[name] = _get_momentum(optim) return momentums def register_hook(self, hook, priority='NORMAL'): 'Register a hook into the hook list.\n\n The hook will be inserted into a priority queue, with the specified\n priority (See :class:`Priority` for details of priorities).\n For hooks with the same priority, they will be triggered in the same\n order as they are registered.\n\n Args:\n hook (:obj:`Hook`): The hook to be registered.\n priority (int or str or :obj:`Priority`): Hook priority.\n Lower value means higher priority.\n ' assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority inserted = False for i in range((len(self._hooks) - 1), (- 1), (- 1)): if (priority >= self._hooks[i].priority): self._hooks.insert((i + 1), hook) inserted = True break if (not inserted): self._hooks.insert(0, hook) def register_hook_from_cfg(self, hook_cfg): "Register a hook from its cfg.\n\n Args:\n hook_cfg (dict): Hook config. It should have at least keys 'type'\n and 'priority' indicating its type and priority.\n\n Note:\n The specific hook class to register should not use 'type' and\n 'priority' arguments during initialization.\n " hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') hook = mmcv.build_from_cfg(hook_cfg, HOOKS) self.register_hook(hook, priority=priority) def call_hook(self, fn_name): 'Call all hooks.\n\n Args:\n fn_name (str): The function name in each hook to be called, such as\n "before_train_epoch".\n ' for hook in self._hooks: getattr(hook, fn_name)(self) def get_hook_info(self): stage_hook_map = {stage: [] for stage in Hook.stages} for hook in self.hooks: try: priority = Priority(hook.priority).name except ValueError: priority = hook.priority classname = hook.__class__.__name__ hook_info = f'({priority:<12}) {classname:<35}' for trigger_stage in hook.get_triggered_stages(): stage_hook_map[trigger_stage].append(hook_info) stage_hook_infos = [] for stage in Hook.stages: hook_infos = stage_hook_map[stage] if (len(hook_infos) > 0): info = f'''{stage}: ''' info += '\n'.join(hook_infos) info += '\n -------------------- ' stage_hook_infos.append(info) return '\n'.join(stage_hook_infos) def load_checkpoint(self, filename, map_location='cpu', strict=False, revise_keys=[('^module.', '')]): return load_checkpoint(self.model, filename, map_location, strict, self.logger, revise_keys=revise_keys) def resume(self, checkpoint, resume_optimizer=True, map_location='default'): if (map_location == 'default'): if torch.cuda.is_available(): device_id = torch.cuda.current_device() checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id))) else: checkpoint = self.load_checkpoint(checkpoint) else: checkpoint = self.load_checkpoint(checkpoint, map_location=map_location) self._epoch = checkpoint['meta']['epoch'] self._iter = checkpoint['meta']['iter'] if (self.meta is None): self.meta = {} self.meta.setdefault('hook_msgs', {}) self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {})) if ('config' in checkpoint['meta']): config = mmcv.Config.fromstring(checkpoint['meta']['config'], file_format='.py') previous_gpu_ids = config.get('gpu_ids', None) if (previous_gpu_ids and (len(previous_gpu_ids) > 0) and (len(previous_gpu_ids) != self.world_size)): self._iter = int(((self._iter * len(previous_gpu_ids)) / self.world_size)) self.logger.info('the iteration number is changed due to change of GPU number') self.meta = checkpoint['meta'] if (('optimizer' in checkpoint) and resume_optimizer): if isinstance(self.optimizer, Optimizer): self.optimizer.load_state_dict(checkpoint['optimizer']) elif isinstance(self.optimizer, dict): for k in self.optimizer.keys(): self.optimizer[k].load_state_dict(checkpoint['optimizer'][k]) else: raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}') self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) def register_lr_hook(self, lr_config): if (lr_config is None): return elif isinstance(lr_config, dict): assert ('policy' in lr_config) policy_type = lr_config.pop('policy') if (policy_type == policy_type.lower()): policy_type = policy_type.title() hook_type = (policy_type + 'LrUpdaterHook') lr_config['type'] = hook_type hook = mmcv.build_from_cfg(lr_config, HOOKS) else: hook = lr_config self.register_hook(hook, priority='VERY_HIGH') def register_momentum_hook(self, momentum_config): if (momentum_config is None): return if isinstance(momentum_config, dict): assert ('policy' in momentum_config) policy_type = momentum_config.pop('policy') if (policy_type == policy_type.lower()): policy_type = policy_type.title() hook_type = (policy_type + 'MomentumUpdaterHook') momentum_config['type'] = hook_type hook = mmcv.build_from_cfg(momentum_config, HOOKS) else: hook = momentum_config self.register_hook(hook, priority='HIGH') def register_optimizer_hook(self, optimizer_config): if (optimizer_config is None): return if isinstance(optimizer_config, dict): optimizer_config.setdefault('type', 'OptimizerHook') hook = mmcv.build_from_cfg(optimizer_config, HOOKS) else: hook = optimizer_config self.register_hook(hook, priority='ABOVE_NORMAL') def register_checkpoint_hook(self, checkpoint_config): if (checkpoint_config is None): return if isinstance(checkpoint_config, dict): checkpoint_config.setdefault('type', 'CheckpointHook') hook = mmcv.build_from_cfg(checkpoint_config, HOOKS) else: hook = checkpoint_config self.register_hook(hook, priority='NORMAL') def register_logger_hooks(self, log_config): if (log_config is None): return log_interval = log_config['interval'] for info in log_config['hooks']: logger_hook = mmcv.build_from_cfg(info, HOOKS, default_args=dict(interval=log_interval)) self.register_hook(logger_hook, priority='VERY_LOW') def register_timer_hook(self, timer_config): if (timer_config is None): return if isinstance(timer_config, dict): timer_config_ = copy.deepcopy(timer_config) hook = mmcv.build_from_cfg(timer_config_, HOOKS) else: hook = timer_config self.register_hook(hook, priority='LOW') def register_custom_hooks(self, custom_config): if (custom_config is None): return if (not isinstance(custom_config, list)): custom_config = [custom_config] for item in custom_config: if isinstance(item, dict): self.register_hook_from_cfg(item) else: self.register_hook(item, priority='NORMAL') def register_profiler_hook(self, profiler_config): if (profiler_config is None): return if isinstance(profiler_config, dict): profiler_config.setdefault('type', 'ProfilerHook') hook = mmcv.build_from_cfg(profiler_config, HOOKS) else: hook = profiler_config self.register_hook(hook) def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, timer_config=dict(type='IterTimerHook'), custom_hooks_config=None): 'Register default and custom hooks for training.\n\n Default and custom hooks include:\n\n +----------------------+-------------------------+\n | Hooks | Priority |\n +======================+=========================+\n | LrUpdaterHook | VERY_HIGH (10) |\n +----------------------+-------------------------+\n | MomentumUpdaterHook | HIGH (30) |\n +----------------------+-------------------------+\n | OptimizerStepperHook | ABOVE_NORMAL (40) |\n +----------------------+-------------------------+\n | CheckpointSaverHook | NORMAL (50) |\n +----------------------+-------------------------+\n | IterTimerHook | LOW (70) |\n +----------------------+-------------------------+\n | LoggerHook(s) | VERY_LOW (90) |\n +----------------------+-------------------------+\n | CustomHook(s) | defaults to NORMAL (50) |\n +----------------------+-------------------------+\n\n If custom hooks have same priority with default hooks, custom hooks\n will be triggered after default hooks.\n ' self.register_lr_hook(lr_config) self.register_momentum_hook(momentum_config) self.register_optimizer_hook(optimizer_config) self.register_checkpoint_hook(checkpoint_config) self.register_timer_hook(timer_config) self.register_logger_hooks(log_config) self.register_custom_hooks(custom_hooks_config)
def build_runner_constructor(cfg): return RUNNER_BUILDERS.build(cfg)
def build_runner(cfg, default_args=None): runner_cfg = copy.deepcopy(cfg) constructor_type = runner_cfg.pop('constructor', 'DefaultRunnerConstructor') runner_constructor = build_runner_constructor(dict(type=constructor_type, runner_cfg=runner_cfg, default_args=default_args)) runner = runner_constructor() return runner
def _get_mmcv_home(): mmcv_home = os.path.expanduser(os.getenv(ENV_MMCV_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) mkdir_or_exist(mmcv_home) return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None): "Load state_dict to a module.\n\n This method is modified from :meth:`torch.nn.Module.load_state_dict`.\n Default value for ``strict`` is set to ``False`` and the message for\n param mismatch will be shown even if strict is False.\n\n Args:\n module (Module): Module that receives the state_dict.\n state_dict (OrderedDict): Weights.\n strict (bool): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.\n logger (:obj:`logging.Logger`, optional): Logger to log the error\n message. If not specified, print function will be used.\n " unexpected_keys = [] all_missing_keys = [] err_msg = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if (metadata is not None): state_dict._metadata = metadata def load(module, prefix=''): if is_module_wrapper(module): module = module.module local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {})) module._load_from_state_dict(state_dict, prefix, local_metadata, True, all_missing_keys, unexpected_keys, err_msg) for (name, child) in module._modules.items(): if (child is not None): load(child, ((prefix + name) + '.')) load(module) load = None missing_keys = [key for key in all_missing_keys if ('num_batches_tracked' not in key)] if unexpected_keys: err_msg.append(f'''unexpected key in source state_dict: {', '.join(unexpected_keys)} ''') if missing_keys: err_msg.append(f'''missing keys in source state_dict: {', '.join(missing_keys)} ''') (rank, _) = get_dist_info() if ((len(err_msg) > 0) and (rank == 0)): err_msg.insert(0, 'The model and loaded state dict do not match exactly\n') err_msg = '\n'.join(err_msg) if strict: raise RuntimeError(err_msg) elif (logger is not None): logger.warning(err_msg) else: print(err_msg)
def get_torchvision_models(): model_urls = dict() for (_, name, ispkg) in pkgutil.walk_packages(torchvision.models.__path__): if ispkg: continue _zoo = import_module(f'torchvision.models.{name}') if hasattr(_zoo, 'model_urls'): _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) return model_urls
def get_external_models(): mmcv_home = _get_mmcv_home() default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') default_urls = load_file(default_json_path) assert isinstance(default_urls, dict) external_json_path = osp.join(mmcv_home, 'open_mmlab.json') if osp.exists(external_json_path): external_urls = load_file(external_json_path) assert isinstance(external_urls, dict) default_urls.update(external_urls) return default_urls
def get_mmcls_models(): mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') mmcls_urls = load_file(mmcls_json_path) return mmcls_urls
def get_deprecated_model_names(): deprecate_json_path = osp.join(mmcv.__path__[0], 'model_zoo/deprecated.json') deprecate_urls = load_file(deprecate_json_path) assert isinstance(deprecate_urls, dict) return deprecate_urls
def _process_mmcls_checkpoint(checkpoint): if ('state_dict' in checkpoint): state_dict = checkpoint['state_dict'] else: state_dict = checkpoint new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if k.startswith('backbone.'): new_state_dict[k[9:]] = v new_checkpoint = dict(state_dict=new_state_dict) return new_checkpoint
class CheckpointLoader(): 'A general checkpoint loader to manage all schemes.' _schemes = {} @classmethod def _register_scheme(cls, prefixes, loader, force=False): if isinstance(prefixes, str): prefixes = [prefixes] else: assert isinstance(prefixes, (list, tuple)) for prefix in prefixes: if ((prefix not in cls._schemes) or force): cls._schemes[prefix] = loader else: raise KeyError(f'{prefix} is already registered as a loader backend, add "force=True" if you want to override it') cls._schemes = OrderedDict(sorted(cls._schemes.items(), key=(lambda t: t[0]), reverse=True)) @classmethod def register_scheme(cls, prefixes, loader=None, force=False): 'Register a loader to CheckpointLoader.\n\n This method can be used as a normal class method or a decorator.\n\n Args:\n prefixes (str or list[str] or tuple[str]):\n The prefix of the registered loader.\n loader (function, optional): The loader function to be registered.\n When this method is used as a decorator, loader is None.\n Defaults to None.\n force (bool, optional): Whether to override the loader\n if the prefix has already been registered. Defaults to False.\n ' if (loader is not None): cls._register_scheme(prefixes, loader, force=force) return def _register(loader_cls): cls._register_scheme(prefixes, loader_cls, force=force) return loader_cls return _register @classmethod def _get_checkpoint_loader(cls, path): 'Finds a loader that supports the given path. Falls back to the local\n loader if no other loader is found.\n\n Args:\n path (str): checkpoint path\n\n Returns:\n callable: checkpoint loader\n ' for p in cls._schemes: if (re.match(p, path) is not None): return cls._schemes[p] @classmethod def load_checkpoint(cls, filename, map_location=None, logger=None): 'load checkpoint through URL scheme path.\n\n Args:\n filename (str): checkpoint file name with given prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n logger (:mod:`logging.Logger`, optional): The logger for message.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' checkpoint_loader = cls._get_checkpoint_loader(filename) class_name = checkpoint_loader.__name__ mmcv.print_log(f'load checkpoint from {class_name[10:]} path: {filename}', logger) return checkpoint_loader(filename, map_location)
@CheckpointLoader.register_scheme(prefixes='') def load_from_local(filename, map_location): 'load checkpoint by local file path.\n\n Args:\n filename (str): local checkpoint file path\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' filename = osp.expanduser(filename) if (not osp.isfile(filename)): raise FileNotFoundError(f'{filename} can not be found.') checkpoint = torch.load(filename, map_location=map_location) return checkpoint
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://')) def load_from_http(filename, map_location=None, model_dir=None): 'load checkpoint through HTTP or HTTPS scheme path. In distributed\n setting, this function only download checkpoint at local rank 0.\n\n Args:\n filename (str): checkpoint file path with modelzoo or\n torchvision prefix\n map_location (str, optional): Same as :func:`torch.load`.\n model_dir (string, optional): directory in which to save the object,\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' (rank, world_size) = get_dist_info() if (rank == 0): checkpoint = load_url(filename, model_dir=model_dir, map_location=map_location) if (world_size > 1): torch.distributed.barrier() if (rank > 0): checkpoint = load_url(filename, model_dir=model_dir, map_location=map_location) return checkpoint
@CheckpointLoader.register_scheme(prefixes='pavi://') def load_from_pavi(filename, map_location=None): 'load checkpoint through the file path prefixed with pavi. In distributed\n setting, this function download ckpt at all ranks to different temporary\n directories.\n\n Args:\n filename (str): checkpoint file path with pavi prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' assert filename.startswith('pavi://'), f'Expected filename startswith `pavi://`, but get {filename}' model_path = filename[7:] try: from pavi import modelcloud except ImportError: raise ImportError('Please install pavi to load checkpoint from modelcloud.') model = modelcloud.get(model_path) with TemporaryDirectory() as tmp_dir: downloaded_file = osp.join(tmp_dir, model.name) model.download(downloaded_file) checkpoint = torch.load(downloaded_file, map_location=map_location) return checkpoint
@CheckpointLoader.register_scheme(prefixes='(\\S+\\:)?s3://') def load_from_ceph(filename, map_location=None, backend='petrel'): "load checkpoint through the file path prefixed with s3. In distributed\n setting, this function download ckpt at all ranks to different temporary\n directories.\n\n Note:\n Since v1.4.1, the registered scheme prefixes have been enhanced to\n support bucket names in the path prefix, e.g. 's3://xx.xx/xx.path',\n 'bucket1:s3://xx.xx/xx.path'.\n\n Args:\n filename (str): checkpoint file path with s3 prefix\n map_location (str, optional): Same as :func:`torch.load`.\n backend (str, optional): The storage backend type. Options are 'ceph',\n 'petrel'. Default: 'petrel'.\n\n .. warning::\n :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,\n please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n " allowed_backends = ['ceph', 'petrel'] if (backend not in allowed_backends): raise ValueError(f'Load from Backend {backend} is not supported.') if (backend == 'ceph'): warnings.warn('CephBackend will be deprecated, please use PetrelBackend instead', DeprecationWarning) try: file_client = FileClient(backend=backend) except ImportError: allowed_backends.remove(backend) file_client = FileClient(backend=allowed_backends[0]) with io.BytesIO(file_client.get(filename)) as buffer: checkpoint = torch.load(buffer, map_location=map_location) return checkpoint
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://')) def load_from_torchvision(filename, map_location=None): 'load checkpoint through the file path prefixed with modelzoo or\n torchvision.\n\n Args:\n filename (str): checkpoint file path with modelzoo or\n torchvision prefix\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' model_urls = get_torchvision_models() if filename.startswith('modelzoo://'): warnings.warn('The URL scheme of "modelzoo://" is deprecated, please use "torchvision://" instead', DeprecationWarning) model_name = filename[11:] else: model_name = filename[14:] return load_from_http(model_urls[model_name], map_location=map_location)
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://')) def load_from_openmmlab(filename, map_location=None): 'load checkpoint through the file path prefixed with open-mmlab or\n openmmlab.\n\n Args:\n filename (str): checkpoint file path with open-mmlab or\n openmmlab prefix\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' model_urls = get_external_models() prefix_str = 'open-mmlab://' if filename.startswith(prefix_str): model_name = filename[13:] else: model_name = filename[12:] prefix_str = 'openmmlab://' deprecated_urls = get_deprecated_model_names() if (model_name in deprecated_urls): warnings.warn(f'{prefix_str}{model_name} is deprecated in favor of {prefix_str}{deprecated_urls[model_name]}', DeprecationWarning) model_name = deprecated_urls[model_name] model_url = model_urls[model_name] if model_url.startswith(('http://', 'https://')): checkpoint = load_from_http(model_url, map_location=map_location) else: filename = osp.join(_get_mmcv_home(), model_url) if (not osp.isfile(filename)): raise FileNotFoundError(f'{filename} can not be found.') checkpoint = torch.load(filename, map_location=map_location) return checkpoint
@CheckpointLoader.register_scheme(prefixes='mmcls://') def load_from_mmcls(filename, map_location=None): 'load checkpoint through the file path prefixed with mmcls.\n\n Args:\n filename (str): checkpoint file path with mmcls prefix\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' model_urls = get_mmcls_models() model_name = filename[8:] checkpoint = load_from_http(model_urls[model_name], map_location=map_location) checkpoint = _process_mmcls_checkpoint(checkpoint) return checkpoint
def _load_checkpoint(filename, map_location=None, logger=None): 'Load checkpoint from somewhere (modelzoo, file, url).\n\n Args:\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n map_location (str, optional): Same as :func:`torch.load`.\n Default: None.\n logger (:mod:`logging.Logger`, optional): The logger for error message.\n Default: None\n\n Returns:\n dict or OrderedDict: The loaded checkpoint. It can be either an\n OrderedDict storing model weights or a dict containing other\n information, which depends on the checkpoint.\n ' return CheckpointLoader.load_checkpoint(filename, map_location, logger)
def _load_checkpoint_with_prefix(prefix, filename, map_location=None): 'Load partial pretrained model with specific prefix.\n\n Args:\n prefix (str): The prefix of sub-module.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n map_location (str | None): Same as :func:`torch.load`. Default: None.\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n ' checkpoint = _load_checkpoint(filename, map_location=map_location) if ('state_dict' in checkpoint): state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if (not prefix.endswith('.')): prefix += '.' prefix_len = len(prefix) state_dict = {k[prefix_len:]: v for (k, v) in state_dict.items() if k.startswith(prefix)} assert state_dict, f'{prefix} is not in the pretrained model' return state_dict
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[('^module\\.', '')]): "Load checkpoint from a file or URI.\n\n Args:\n model (Module): Module to load checkpoint.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n map_location (str): Same as :func:`torch.load`.\n strict (bool): Whether to allow different params for the model and\n checkpoint.\n logger (:mod:`logging.Logger` or None): The logger for error message.\n revise_keys (list): A list of customized keywords to modify the\n state_dict in checkpoint. Each item is a (pattern, replacement)\n pair of the regular expression operations. Default: strip\n the prefix 'module.' by [(r'^module\\.', '')].\n\n Returns:\n dict or OrderedDict: The loaded checkpoint.\n " checkpoint = _load_checkpoint(filename, map_location, logger) if (not isinstance(checkpoint, dict)): raise RuntimeError(f'No state_dict found in checkpoint file {filename}') if ('state_dict' in checkpoint): state_dict = checkpoint['state_dict'] else: state_dict = checkpoint metadata = getattr(state_dict, '_metadata', OrderedDict()) for (p, r) in revise_keys: state_dict = OrderedDict({re.sub(p, r, k): v for (k, v) in state_dict.items()}) state_dict._metadata = metadata load_state_dict(model, state_dict, strict, logger) return checkpoint
def weights_to_cpu(state_dict): 'Copy a model state_dict to cpu.\n\n Args:\n state_dict (OrderedDict): Model weights on GPU.\n\n Returns:\n OrderedDict: Model weights on GPU.\n ' state_dict_cpu = OrderedDict() for (key, val) in state_dict.items(): state_dict_cpu[key] = val.cpu() state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict()) return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars): 'Saves module state to `destination` dictionary.\n\n This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.\n\n Args:\n module (nn.Module): The module to generate state_dict.\n destination (dict): A dict where state will be stored.\n prefix (str): The prefix for parameters and buffers used in this\n module.\n ' for (name, param) in module._parameters.items(): if (param is not None): destination[(prefix + name)] = (param if keep_vars else param.detach()) for (name, buf) in module._buffers.items(): if (buf is not None): destination[(prefix + name)] = (buf if keep_vars else buf.detach())
def get_state_dict(module, destination=None, prefix='', keep_vars=False): 'Returns a dictionary containing a whole state of the module.\n\n Both parameters and persistent buffers (e.g. running averages) are\n included. Keys are corresponding parameter and buffer names.\n\n This method is modified from :meth:`torch.nn.Module.state_dict` to\n recursively check parallel module in case that the model has a complicated\n structure, e.g., nn.Module(nn.Module(DDP)).\n\n Args:\n module (nn.Module): The module to generate state_dict.\n destination (OrderedDict): Returned dict for the state of the\n module.\n prefix (str): Prefix of the key.\n keep_vars (bool): Whether to keep the variable property of the\n parameters. Default: False.\n\n Returns:\n dict: A dictionary containing a whole state of the module.\n ' if is_module_wrapper(module): module = module.module if (destination is None): destination = OrderedDict() destination._metadata = OrderedDict() destination._metadata[prefix[:(- 1)]] = local_metadata = dict(version=module._version) _save_to_state_dict(module, destination, prefix, keep_vars) for (name, child) in module._modules.items(): if (child is not None): get_state_dict(child, destination, ((prefix + name) + '.'), keep_vars=keep_vars) for hook in module._state_dict_hooks.values(): hook_result = hook(module, destination, prefix, local_metadata) if (hook_result is not None): destination = hook_result return destination
def save_checkpoint(model, filename, optimizer=None, meta=None, file_client_args=None): 'Save checkpoint to file.\n\n The checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n ``optimizer``. By default ``meta`` will contain version and time info.\n\n Args:\n model (Module): Module whose params are to be saved.\n filename (str): Checkpoint filename.\n optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.\n meta (dict, optional): Metadata to be saved in checkpoint.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n `New in version 1.3.16.`\n ' if (meta is None): meta = {} elif (not isinstance(meta, dict)): raise TypeError(f'meta must be a dict or None, but got {type(meta)}') meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) if is_module_wrapper(model): model = model.module if (hasattr(model, 'CLASSES') and (model.CLASSES is not None)): meta.update(CLASSES=model.CLASSES) checkpoint = {'meta': meta, 'state_dict': weights_to_cpu(get_state_dict(model))} if isinstance(optimizer, Optimizer): checkpoint['optimizer'] = optimizer.state_dict() elif isinstance(optimizer, dict): checkpoint['optimizer'] = {} for (name, optim) in optimizer.items(): checkpoint['optimizer'][name] = optim.state_dict() if filename.startswith('pavi://'): if (file_client_args is not None): raise ValueError(f'file_client_args should be "None" if filename starts with"pavi://", but got {file_client_args}') try: from pavi import exception, modelcloud except ImportError: raise ImportError('Please install pavi to load checkpoint from modelcloud.') model_path = filename[7:] root = modelcloud.Folder() (model_dir, model_name) = osp.split(model_path) try: model = modelcloud.get(model_dir) except exception.NodeNotFoundError: model = root.create_training_model(model_dir) with TemporaryDirectory() as tmp_dir: checkpoint_file = osp.join(tmp_dir, model_name) with open(checkpoint_file, 'wb') as f: torch.save(checkpoint, f) f.flush() model.create_file(checkpoint_file, name=model_name) else: file_client = FileClient.infer_client(file_client_args, filename) with io.BytesIO() as f: torch.save(checkpoint, f) file_client.put(f.getvalue(), filename)
@RUNNER_BUILDERS.register_module() class DefaultRunnerConstructor(): "Default constructor for runners.\n\n Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.\n For example, We can inject some new properties and functions for `Runner`.\n\n Example:\n >>> from mmcv.runner import RUNNER_BUILDERS, build_runner\n >>> # Define a new RunnerReconstructor\n >>> @RUNNER_BUILDERS.register_module()\n >>> class MyRunnerConstructor:\n ... def __init__(self, runner_cfg, default_args=None):\n ... if not isinstance(runner_cfg, dict):\n ... raise TypeError('runner_cfg should be a dict',\n ... f'but got {type(runner_cfg)}')\n ... self.runner_cfg = runner_cfg\n ... self.default_args = default_args\n ...\n ... def __call__(self):\n ... runner = RUNNERS.build(self.runner_cfg,\n ... default_args=self.default_args)\n ... # Add new properties for existing runner\n ... runner.my_name = 'my_runner'\n ... runner.my_function = lambda self: print(self.my_name)\n ... ...\n >>> # build your runner\n >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,\n ... constructor='MyRunnerConstructor')\n >>> runner = build_runner(runner_cfg)\n " def __init__(self, runner_cfg, default_args=None): if (not isinstance(runner_cfg, dict)): raise TypeError('runner_cfg should be a dict', f'but got {type(runner_cfg)}') self.runner_cfg = runner_cfg self.default_args = default_args def __call__(self): return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
def init_dist(launcher, backend='nccl', **kwargs): if (mp.get_start_method(allow_none=True) is None): mp.set_start_method('spawn') if (launcher == 'pytorch'): _init_dist_pytorch(backend, **kwargs) elif (launcher == 'mpi'): _init_dist_mpi(backend, **kwargs) elif (launcher == 'slurm'): _init_dist_slurm(backend, **kwargs) else: raise ValueError(f'Invalid launcher type: {launcher}')
def _init_dist_pytorch(backend, **kwargs): rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device((rank % num_gpus)) dist.init_process_group(backend=backend, **kwargs)
def _init_dist_mpi(backend, **kwargs): rank = int(os.environ['OMPI_COMM_WORLD_RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device((rank % num_gpus)) dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None): 'Initialize slurm distributed training environment.\n\n If argument ``port`` is not specified, then the master port will be system\n environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system\n environment variable, then a default port ``29500`` will be used.\n\n Args:\n backend (str): Backend of torch.distributed.\n port (int, optional): Master port. Defaults to None.\n ' proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() torch.cuda.set_device((proc_id % num_gpus)) addr = subprocess.getoutput(f'scontrol show hostname {node_list} | head -n1') if (port is not None): os.environ['MASTER_PORT'] = str(port) elif ('MASTER_PORT' in os.environ): pass else: os.environ['MASTER_PORT'] = '29500' if ('MASTER_ADDR' not in os.environ): os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['LOCAL_RANK'] = str((proc_id % num_gpus)) os.environ['RANK'] = str(proc_id) dist.init_process_group(backend=backend)
def get_dist_info(): if (dist.is_available() and dist.is_initialized()): rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return (rank, world_size)
def master_only(func): @functools.wraps(func) def wrapper(*args, **kwargs): (rank, _) = get_dist_info() if (rank == 0): return func(*args, **kwargs) return wrapper
def allreduce_params(params, coalesce=True, bucket_size_mb=(- 1)): 'Allreduce parameters.\n\n Args:\n params (list[torch.Parameters]): List of parameters or buffers of a\n model.\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n ' (_, world_size) = get_dist_info() if (world_size == 1): return params = [param.data for param in params] if coalesce: _allreduce_coalesced(params, world_size, bucket_size_mb) else: for tensor in params: dist.all_reduce(tensor.div_(world_size))
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)): 'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n ' grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))] (_, world_size) = get_dist_info() if (world_size == 1): return if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size))
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)): if (bucket_size_mb > 0): bucket_size_bytes = ((bucket_size_mb * 1024) * 1024) buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if (tp not in buckets): buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced)
@RUNNERS.register_module() class EpochBasedRunner(BaseRunner): 'Epoch-based Runner.\n\n This runner train models epoch by epoch.\n ' def run_iter(self, data_batch, train_mode, **kwargs): if (self.batch_processor is not None): outputs = self.batch_processor(self.model, data_batch, train_mode=train_mode, **kwargs) elif train_mode: outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) else: outputs = self.model.val_step(data_batch, self.optimizer, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('"batch_processor()" or "model.train_step()"and "model.val_step()" must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._max_iters = (self._max_epochs * len(self.data_loader)) self.call_hook('before_train_epoch') time.sleep(2) for (i, data_batch) in enumerate(self.data_loader): self._inner_iter = i self.call_hook('before_train_iter') self.run_iter(data_batch, train_mode=True, **kwargs) self.call_hook('after_train_iter') self._iter += 1 self.call_hook('after_train_epoch') self._epoch += 1 @torch.no_grad() def val(self, data_loader, **kwargs): self.model.eval() self.mode = 'val' self.data_loader = data_loader self.call_hook('before_val_epoch') time.sleep(2) for (i, data_batch) in enumerate(self.data_loader): self._inner_iter = i self.call_hook('before_val_iter') self.run_iter(data_batch, train_mode=False) self.call_hook('after_val_iter') self.call_hook('after_val_epoch') def run(self, data_loaders, workflow, max_epochs=None, **kwargs): "Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, epochs) to specify the\n running order and epochs. E.g, [('train', 2), ('val', 1)] means\n running 2 epochs for training and 1 epoch for validation,\n iteratively.\n " assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert (len(data_loaders) == len(workflow)) if (max_epochs is not None): warnings.warn('setting max_epochs in run is deprecated, please set max_epochs in runner_config', DeprecationWarning) self._max_epochs = max_epochs assert (self._max_epochs is not None), 'max_epochs must be specified during instantiation' for (i, flow) in enumerate(workflow): (mode, epochs) = flow if (mode == 'train'): self._max_iters = (self._max_epochs * len(data_loaders[i])) break work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE') self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info()) self.logger.info('workflow: %s, max: %d epochs', workflow, self._max_epochs) self.call_hook('before_run') while (self.epoch < self._max_epochs): for (i, flow) in enumerate(workflow): (mode, epochs) = flow if isinstance(mode, str): if (not hasattr(self, mode)): raise ValueError(f'runner has no method named "{mode}" to run an epoch') epoch_runner = getattr(self, mode) else: raise TypeError('mode in workflow must be a str, but got {}'.format(type(mode))) for _ in range(epochs): if ((mode == 'train') and (self.epoch >= self._max_epochs)): break epoch_runner(data_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_run') def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True): 'Save the checkpoint.\n\n Args:\n out_dir (str): The directory that checkpoints are saved.\n filename_tmpl (str, optional): The checkpoint filename template,\n which contains a placeholder for the epoch number.\n Defaults to \'epoch_{}.pth\'.\n save_optimizer (bool, optional): Whether to save the optimizer to\n the checkpoint. Defaults to True.\n meta (dict, optional): The meta information to be saved in the\n checkpoint. Defaults to None.\n create_symlink (bool, optional): Whether to create a symlink\n "latest.pth" to point to the latest checkpoint.\n Defaults to True.\n ' if (meta is None): meta = {} elif (not isinstance(meta, dict)): raise TypeError(f'meta should be a dict or None, but got {type(meta)}') if (self.meta is not None): meta.update(self.meta) meta.update(epoch=(self.epoch + 1), iter=self.iter) filename = filename_tmpl.format((self.epoch + 1)) filepath = osp.join(out_dir, filename) optimizer = (self.optimizer if save_optimizer else None) save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) if create_symlink: dst_file = osp.join(out_dir, 'latest.pth') if (platform.system() != 'Windows'): mmcv.symlink(filename, dst_file) else: shutil.copy(filepath, dst_file)
@RUNNERS.register_module() class Runner(EpochBasedRunner): 'Deprecated name of EpochBasedRunner.' def __init__(self, *args, **kwargs): warnings.warn('Runner was deprecated, please use EpochBasedRunner instead', DeprecationWarning) super().__init__(*args, **kwargs)
def cast_tensor_type(inputs, src_type, dst_type): 'Recursively convert Tensor in inputs from src_type to dst_type.\n\n Note:\n In v1.4.4 and later, ``cast_tersor_type`` will only convert the\n torch.Tensor which is consistent with ``src_type`` to the ``dst_type``.\n Before v1.4.4, it ignores the ``src_type`` argument, leading to some\n potential problems. For example,\n ``cast_tensor_type(inputs, torch.float, torch.half)`` will convert all\n tensors in inputs to ``torch.half`` including those originally in\n ``torch.Int`` or other types, which is not expected.\n\n Args:\n inputs: Inputs that to be casted.\n src_type (torch.dtype): Source type..\n dst_type (torch.dtype): Destination type.\n\n Returns:\n The same type with inputs, but all contained Tensors have been cast.\n ' if isinstance(inputs, nn.Module): return inputs elif isinstance(inputs, torch.Tensor): return (inputs.to(dst_type) if (inputs.dtype == src_type) else inputs) elif isinstance(inputs, str): return inputs elif isinstance(inputs, np.ndarray): return inputs elif isinstance(inputs, abc.Mapping): return type(inputs)({k: cast_tensor_type(v, src_type, dst_type) for (k, v) in inputs.items()}) elif isinstance(inputs, abc.Iterable): return type(inputs)((cast_tensor_type(item, src_type, dst_type) for item in inputs)) else: return inputs
def auto_fp16(apply_to=None, out_fp32=False): "Decorator to enable fp16 training automatically.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If inputs arguments are fp32 tensors, they will\n be converted to fp16 automatically. Arguments other than fp32 tensors are\n ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the\n backend, otherwise, original mmcv implementation will be adopted.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp32 (bool): Whether to convert the output back to fp32.\n\n Example:\n\n >>> import torch.nn as nn\n >>> class MyModule1(nn.Module):\n >>>\n >>> # Convert x and y to fp16\n >>> @auto_fp16()\n >>> def forward(self, x, y):\n >>> pass\n\n >>> import torch.nn as nn\n >>> class MyModule2(nn.Module):\n >>>\n >>> # convert pred to fp16\n >>> @auto_fp16(apply_to=('pred', ))\n >>> def do_something(self, pred, others):\n >>> pass\n " def auto_fp16_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): if (not isinstance(args[0], torch.nn.Module)): raise TypeError('@auto_fp16 can only be used to decorate the method of nn.Module') if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)): return old_func(*args, **kwargs) args_info = getfullargspec(old_func) args_to_cast = (args_info.args if (apply_to is None) else apply_to) new_args = [] if args: arg_names = args_info.args[:len(args)] for (i, arg_name) in enumerate(arg_names): if (arg_name in args_to_cast): new_args.append(cast_tensor_type(args[i], torch.float, torch.half)) else: new_args.append(args[i]) new_kwargs = {} if kwargs: for (arg_name, arg_value) in kwargs.items(): if (arg_name in args_to_cast): new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.float, torch.half) else: new_kwargs[arg_name] = arg_value if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))): with autocast(enabled=True): output = old_func(*new_args, **new_kwargs) else: output = old_func(*new_args, **new_kwargs) if out_fp32: output = cast_tensor_type(output, torch.half, torch.float) return output return new_func return auto_fp16_wrapper
def force_fp32(apply_to=None, out_fp16=False): "Decorator to convert input arguments to fp32 in force.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If there are some inputs that must be processed\n in fp32 mode, then this decorator can handle it. If inputs arguments are\n fp16 tensors, they will be converted to fp32 automatically. Arguments other\n than fp16 tensors are ignored. If you are using PyTorch >= 1.6,\n torch.cuda.amp is used as the backend, otherwise, original mmcv\n implementation will be adopted.\n\n Args:\n apply_to (Iterable, optional): The argument names to be converted.\n `None` indicates all arguments.\n out_fp16 (bool): Whether to convert the output back to fp16.\n\n Example:\n\n >>> import torch.nn as nn\n >>> class MyModule1(nn.Module):\n >>>\n >>> # Convert x and y to fp32\n >>> @force_fp32()\n >>> def loss(self, x, y):\n >>> pass\n\n >>> import torch.nn as nn\n >>> class MyModule2(nn.Module):\n >>>\n >>> # convert pred to fp32\n >>> @force_fp32(apply_to=('pred', ))\n >>> def post_process(self, pred, others):\n >>> pass\n " def force_fp32_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): if (not isinstance(args[0], torch.nn.Module)): raise TypeError('@force_fp32 can only be used to decorate the method of nn.Module') if (not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled)): return old_func(*args, **kwargs) args_info = getfullargspec(old_func) args_to_cast = (args_info.args if (apply_to is None) else apply_to) new_args = [] if args: arg_names = args_info.args[:len(args)] for (i, arg_name) in enumerate(arg_names): if (arg_name in args_to_cast): new_args.append(cast_tensor_type(args[i], torch.half, torch.float)) else: new_args.append(args[i]) new_kwargs = dict() if kwargs: for (arg_name, arg_value) in kwargs.items(): if (arg_name in args_to_cast): new_kwargs[arg_name] = cast_tensor_type(arg_value, torch.half, torch.float) else: new_kwargs[arg_name] = arg_value if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.6.0'))): with autocast(enabled=False): output = old_func(*new_args, **new_kwargs) else: output = old_func(*new_args, **new_kwargs) if out_fp16: output = cast_tensor_type(output, torch.float, torch.half) return output return new_func return force_fp32_wrapper
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)): warnings.warning('"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be removed in v2.8. Please switch to "mmcv.runner.allreduce_grads', DeprecationWarning) _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb)
def wrap_fp16_model(model): 'Wrap the FP32 model to FP16.\n\n If you are using PyTorch >= 1.6, torch.cuda.amp is used as the\n backend, otherwise, original mmcv implementation will be adopted.\n\n For PyTorch >= 1.6, this function will\n 1. Set fp16 flag inside the model to True.\n\n Otherwise:\n 1. Convert FP32 model to FP16.\n 2. Remain some necessary layers to be FP32, e.g., normalization layers.\n 3. Set `fp16_enabled` flag inside the model to True.\n\n Args:\n model (nn.Module): Model in FP32.\n ' if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.6.0'))): model.half() patch_norm_fp32(model) for m in model.modules(): if hasattr(m, 'fp16_enabled'): m.fp16_enabled = True
def patch_norm_fp32(module): 'Recursively convert normalization layers from FP16 to FP32.\n\n Args:\n module (nn.Module): The modules to be converted in FP16.\n\n Returns:\n nn.Module: The converted module, the normalization layers have been\n converted to FP32.\n ' if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): module.float() if (isinstance(module, nn.GroupNorm) or (torch.__version__ < '1.3')): module.forward = patch_forward_method(module.forward, torch.half, torch.float) for child in module.children(): patch_norm_fp32(child) return module
def patch_forward_method(func, src_type, dst_type, convert_output=True): 'Patch the forward method of a module.\n\n Args:\n func (callable): The original forward method.\n src_type (torch.dtype): Type of input arguments to be converted from.\n dst_type (torch.dtype): Type of input arguments to be converted to.\n convert_output (bool): Whether to convert the output back to src_type.\n\n Returns:\n callable: The patched forward method.\n ' def new_forward(*args, **kwargs): output = func(*cast_tensor_type(args, src_type, dst_type), **cast_tensor_type(kwargs, src_type, dst_type)) if convert_output: output = cast_tensor_type(output, dst_type, src_type) return output return new_forward
class LossScaler(): 'Class that manages loss scaling in mixed precision training which\n supports both dynamic or static mode.\n\n The implementation refers to\n https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py.\n Indirectly, by supplying ``mode=\'dynamic\'`` for dynamic loss scaling.\n It\'s important to understand how :class:`LossScaler` operates.\n Loss scaling is designed to combat the problem of underflowing\n gradients encountered at long times when training fp16 networks.\n Dynamic loss scaling begins by attempting a very high loss\n scale. Ironically, this may result in OVERflowing gradients.\n If overflowing gradients are encountered, :class:`FP16_Optimizer` then\n skips the update step for this particular iteration/minibatch,\n and :class:`LossScaler` adjusts the loss scale to a lower value.\n If a certain number of iterations occur without overflowing gradients\n detected,:class:`LossScaler` increases the loss scale once more.\n In this way :class:`LossScaler` attempts to "ride the edge" of always\n using the highest loss scale possible without incurring overflow.\n\n Args:\n init_scale (float): Initial loss scale value, default: 2**32.\n scale_factor (float): Factor used when adjusting the loss scale.\n Default: 2.\n mode (str): Loss scaling mode. \'dynamic\' or \'static\'\n scale_window (int): Number of consecutive iterations without an\n overflow to wait before increasing the loss scale. Default: 1000.\n ' def __init__(self, init_scale=(2 ** 32), mode='dynamic', scale_factor=2.0, scale_window=1000): self.cur_scale = init_scale self.cur_iter = 0 assert (mode in ('dynamic', 'static')), 'mode can only be dynamic or static' self.mode = mode self.last_overflow_iter = (- 1) self.scale_factor = scale_factor self.scale_window = scale_window def has_overflow(self, params): 'Check if params contain overflow.' if (self.mode != 'dynamic'): return False for p in params: if ((p.grad is not None) and LossScaler._has_inf_or_nan(p.grad.data)): return True return False def _has_inf_or_nan(x): 'Check if params contain NaN.' try: cpu_sum = float(x.float().sum()) except RuntimeError as instance: if ('value cannot be converted' not in instance.args[0]): raise return True else: if ((cpu_sum == float('inf')) or (cpu_sum == (- float('inf'))) or (cpu_sum != cpu_sum)): return True return False def update_scale(self, overflow): 'update the current loss scale value when overflow happens.' if (self.mode != 'dynamic'): return if overflow: self.cur_scale = max((self.cur_scale / self.scale_factor), 1) self.last_overflow_iter = self.cur_iter elif (((self.cur_iter - self.last_overflow_iter) % self.scale_window) == 0): self.cur_scale *= self.scale_factor self.cur_iter += 1 def state_dict(self): 'Returns the state of the scaler as a :class:`dict`.' return dict(cur_scale=self.cur_scale, cur_iter=self.cur_iter, mode=self.mode, last_overflow_iter=self.last_overflow_iter, scale_factor=self.scale_factor, scale_window=self.scale_window) def load_state_dict(self, state_dict): 'Loads the loss_scaler state dict.\n\n Args:\n state_dict (dict): scaler state.\n ' self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] self.mode = state_dict['mode'] self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] @property def loss_scale(self): return self.cur_scale
@HOOKS.register_module() class CheckpointHook(Hook): 'Save checkpoints periodically.\n\n Args:\n interval (int): The saving period. If ``by_epoch=True``, interval\n indicates epochs, otherwise it indicates iterations.\n Default: -1, which means "never".\n by_epoch (bool): Saving checkpoints by epoch or by iteration.\n Default: True.\n save_optimizer (bool): Whether to save optimizer state_dict in the\n checkpoint. It is usually used for resuming experiments.\n Default: True.\n out_dir (str, optional): The root directory to save checkpoints. If not\n specified, ``runner.work_dir`` will be used by default. If\n specified, the ``out_dir`` will be the concatenation of ``out_dir``\n and the last level directory of ``runner.work_dir``.\n `Changed in version 1.3.16.`\n max_keep_ckpts (int, optional): The maximum checkpoints to keep.\n In some cases we want only the latest few checkpoints and would\n like to delete old ones to save the disk space.\n Default: -1, which means unlimited.\n save_last (bool, optional): Whether to force the last checkpoint to be\n saved regardless of interval. Default: True.\n sync_buffer (bool, optional): Whether to synchronize buffers in\n different gpus. Default: False.\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n `New in version 1.3.16.`\n\n .. warning::\n Before v1.3.16, the ``out_dir`` argument indicates the path where the\n checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the\n root directory and the final path to save checkpoint is the\n concatenation of ``out_dir`` and the last level directory of\n ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"\n and the value of ``runner.work_dir`` is "/path/of/B", then the final\n path will be "/path/of/A/B".\n ' def __init__(self, interval=(- 1), by_epoch=True, save_optimizer=True, out_dir=None, max_keep_ckpts=(- 1), save_last=True, sync_buffer=False, file_client_args=None, **kwargs): self.interval = interval self.by_epoch = by_epoch self.save_optimizer = save_optimizer self.out_dir = out_dir self.max_keep_ckpts = max_keep_ckpts self.save_last = save_last self.args = kwargs self.sync_buffer = sync_buffer self.file_client_args = file_client_args def before_run(self, runner): if (not self.out_dir): self.out_dir = runner.work_dir self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) if (self.out_dir != runner.work_dir): basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info(f'Checkpoints will be saved to {self.out_dir} by {self.file_client.name}.') if ('create_symlink' in self.args): if (self.args['create_symlink'] and (not self.file_client.allow_symlink)): self.args['create_symlink'] = False warnings.warn(f'create_symlink is set as True by the user but is changedto be False because creating symbolic link is not allowed in {self.file_client.name}') else: self.args['create_symlink'] = self.file_client.allow_symlink def after_train_epoch(self, runner): if (not self.by_epoch): return if (self.every_n_epochs(runner, self.interval) or (self.save_last and self.is_last_epoch(runner))): runner.logger.info(f'Saving checkpoint at {(runner.epoch + 1)} epochs') if self.sync_buffer: allreduce_params(runner.model.buffers()) self._save_checkpoint(runner) @master_only def _save_checkpoint(self, runner): 'Save the current checkpoint and delete unwanted checkpoint.' runner.save_checkpoint(self.out_dir, save_optimizer=self.save_optimizer, **self.args) if (runner.meta is not None): if self.by_epoch: cur_ckpt_filename = self.args.get('filename_tmpl', 'epoch_{}.pth').format((runner.epoch + 1)) else: cur_ckpt_filename = self.args.get('filename_tmpl', 'iter_{}.pth').format((runner.iter + 1)) runner.meta.setdefault('hook_msgs', dict()) runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(self.out_dir, cur_ckpt_filename) if (self.max_keep_ckpts > 0): if self.by_epoch: name = 'epoch_{}.pth' current_ckpt = (runner.epoch + 1) else: name = 'iter_{}.pth' current_ckpt = (runner.iter + 1) redundant_ckpts = range((current_ckpt - (self.max_keep_ckpts * self.interval)), 0, (- self.interval)) filename_tmpl = self.args.get('filename_tmpl', name) for _step in redundant_ckpts: ckpt_path = self.file_client.join_path(self.out_dir, filename_tmpl.format(_step)) if self.file_client.isfile(ckpt_path): self.file_client.remove(ckpt_path) else: break def after_train_iter(self, runner): if self.by_epoch: return if (self.every_n_iters(runner, self.interval) or (self.save_last and self.is_last_iter(runner))): runner.logger.info(f'Saving checkpoint at {(runner.iter + 1)} iterations') if self.sync_buffer: allreduce_params(runner.model.buffers()) self._save_checkpoint(runner)
@HOOKS.register_module() class ClosureHook(Hook): def __init__(self, fn_name, fn): assert hasattr(self, fn_name) assert callable(fn) setattr(self, fn_name, fn)
@HOOKS.register_module() class EMAHook(Hook): 'Exponential Moving Average Hook.\n\n Use Exponential Moving Average on all parameters of model in training\n process. All parameters have a ema backup, which update by the formula\n as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.\n\n .. math::\n\n Xema\\_{t+1} = (1 - \\text{momentum}) \\times\n Xema\\_{t} + \\text{momentum} \\times X_t\n\n Args:\n momentum (float): The momentum used for updating ema parameter.\n Defaults to 0.0002.\n interval (int): Update ema parameter every interval iteration.\n Defaults to 1.\n warm_up (int): During first warm_up steps, we may use smaller momentum\n to update ema parameters more slowly. Defaults to 100.\n resume_from (str): The checkpoint path. Defaults to None.\n ' def __init__(self, momentum=0.0002, interval=1, warm_up=100, resume_from=None): assert (isinstance(interval, int) and (interval > 0)) self.warm_up = warm_up self.interval = interval assert ((momentum > 0) and (momentum < 1)) self.momentum = (momentum ** interval) self.checkpoint = resume_from def before_run(self, runner): "To resume model with it's ema parameters more friendly.\n\n Register ema parameter as ``named_buffer`` to model\n " model = runner.model if is_module_wrapper(model): model = model.module self.param_ema_buffer = {} self.model_parameters = dict(model.named_parameters(recurse=True)) for (name, value) in self.model_parameters.items(): buffer_name = f"ema_{name.replace('.', '_')}" self.param_ema_buffer[name] = buffer_name model.register_buffer(buffer_name, value.data.clone()) self.model_buffers = dict(model.named_buffers(recurse=True)) if (self.checkpoint is not None): runner.resume(self.checkpoint) def after_train_iter(self, runner): 'Update ema parameter every self.interval iterations.' curr_step = runner.iter momentum = min(self.momentum, ((1 + curr_step) / (self.warm_up + curr_step))) if ((curr_step % self.interval) != 0): return for (name, parameter) in self.model_parameters.items(): buffer_name = self.param_ema_buffer[name] buffer_parameter = self.model_buffers[buffer_name] buffer_parameter.mul_((1 - momentum)).add_(momentum, parameter.data) def after_train_epoch(self, runner): 'We load parameter values from ema backup to model before the\n EvalHook.' self._swap_ema_parameters() def before_train_epoch(self, runner): "We recover model's parameter from ema backup after last epoch's\n EvalHook." self._swap_ema_parameters() def _swap_ema_parameters(self): 'Swap the parameter of model with parameter in ema_buffer.' for (name, value) in self.model_parameters.items(): temp = value.data.clone() ema_buffer = self.model_buffers[self.param_ema_buffer[name]] value.data.copy_(ema_buffer.data) ema_buffer.data.copy_(temp)
class EvalHook(Hook): "Non-Distributed evaluation hook.\n\n This hook will regularly perform evaluation in a given interval when\n performing in non-distributed environment.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader, whose dataset has\n implemented ``evaluate`` function.\n start (int | None, optional): Evaluation starting epoch. It enables\n evaluation before the training starts if ``start`` <= the resuming\n epoch. If None, whether to evaluate is merely decided by\n ``interval``. Default: None.\n interval (int): Evaluation interval. Default: 1.\n by_epoch (bool): Determine perform evaluation by epoch or by iteration.\n If set to True, it will perform by epoch. Otherwise, by iteration.\n Default: True.\n save_best (str, optional): If a metric is specified, it would measure\n the best checkpoint during evaluation. The information about best\n checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep\n best score value and best checkpoint path, which will be also\n loaded when resume checkpoint. Options are the evaluation metrics\n on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox\n detection and instance segmentation. ``AR@100`` for proposal\n recall. If ``save_best`` is ``auto``, the first key of the returned\n ``OrderedDict`` result will be used. Default: None.\n rule (str | None, optional): Comparison rule for best score. If set to\n None, it will infer a reasonable rule. Keys such as 'acc', 'top'\n .etc will be inferred by 'greater' rule. Keys contain 'loss' will\n be inferred by 'less' rule. Options are 'greater', 'less', None.\n Default: None.\n test_fn (callable, optional): test a model with samples from a\n dataloader, and return the test results. If ``None``, the default\n test function ``mmcv.engine.single_gpu_test`` will be used.\n (default: ``None``)\n greater_keys (List[str] | None, optional): Metric keys that will be\n inferred by 'greater' comparison rule. If ``None``,\n _default_greater_keys will be used. (default: ``None``)\n less_keys (List[str] | None, optional): Metric keys that will be\n inferred by 'less' comparison rule. If ``None``, _default_less_keys\n will be used. (default: ``None``)\n out_dir (str, optional): The root directory to save checkpoints. If not\n specified, `runner.work_dir` will be used by default. If specified,\n the `out_dir` will be the concatenation of `out_dir` and the last\n level directory of `runner.work_dir`.\n `New in version 1.3.16.`\n file_client_args (dict): Arguments to instantiate a FileClient.\n See :class:`mmcv.fileio.FileClient` for details. Default: None.\n `New in version 1.3.16.`\n **eval_kwargs: Evaluation arguments fed into the evaluate function of\n the dataset.\n\n Note:\n If new arguments are added for EvalHook, tools/test.py,\n tools/eval_metric.py may be affected.\n " rule_map = {'greater': (lambda x, y: (x > y)), 'less': (lambda x, y: (x < y))} init_value_map = {'greater': (- inf), 'less': inf} _default_greater_keys = ['acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU', 'mAcc', 'aAcc'] _default_less_keys = ['loss'] def __init__(self, dataloader, start=None, interval=1, by_epoch=True, save_best=None, rule=None, test_fn=None, greater_keys=None, less_keys=None, out_dir=None, file_client_args=None, **eval_kwargs): if (not isinstance(dataloader, DataLoader)): raise TypeError(f'dataloader must be a pytorch DataLoader, but got {type(dataloader)}') if (interval <= 0): raise ValueError(f'interval must be a positive number, but got {interval}') assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean' if ((start is not None) and (start < 0)): raise ValueError(f'The evaluation start epoch {start} is smaller than 0') self.dataloader = dataloader self.interval = interval self.start = start self.by_epoch = by_epoch assert (isinstance(save_best, str) or (save_best is None)), f'""save_best"" should be a str or None rather than {type(save_best)}' self.save_best = save_best self.eval_kwargs = eval_kwargs self.initial_flag = True if (test_fn is None): from mmcv.engine import single_gpu_test self.test_fn = single_gpu_test else: self.test_fn = test_fn if (greater_keys is None): self.greater_keys = self._default_greater_keys else: if (not isinstance(greater_keys, (list, tuple))): greater_keys = (greater_keys,) assert is_seq_of(greater_keys, str) self.greater_keys = greater_keys if (less_keys is None): self.less_keys = self._default_less_keys else: if (not isinstance(less_keys, (list, tuple))): less_keys = (less_keys,) assert is_seq_of(less_keys, str) self.less_keys = less_keys if (self.save_best is not None): self.best_ckpt_path = None self._init_rule(rule, self.save_best) self.out_dir = out_dir self.file_client_args = file_client_args def _init_rule(self, rule, key_indicator): "Initialize rule, key_indicator, comparison_func, and best score.\n\n Here is the rule to determine which rule is used for key indicator\n when the rule is not specific (note that the key indicator matching\n is case-insensitive):\n 1. If the key indicator is in ``self.greater_keys``, the rule will be\n specified as 'greater'.\n 2. Or if the key indicator is in ``self.less_keys``, the rule will be\n specified as 'less'.\n 3. Or if the key indicator is equal to the substring in any one item\n in ``self.greater_keys``, the rule will be specified as 'greater'.\n 4. Or if the key indicator is equal to the substring in any one item\n in ``self.less_keys``, the rule will be specified as 'less'.\n\n Args:\n rule (str | None): Comparison rule for best score.\n key_indicator (str | None): Key indicator to determine the\n comparison rule.\n " if ((rule not in self.rule_map) and (rule is not None)): raise KeyError(f'rule must be greater, less or None, but got {rule}.') if (rule is None): if (key_indicator != 'auto'): key_indicator_lc = key_indicator.lower() greater_keys = [key.lower() for key in self.greater_keys] less_keys = [key.lower() for key in self.less_keys] if (key_indicator_lc in greater_keys): rule = 'greater' elif (key_indicator_lc in less_keys): rule = 'less' elif any(((key in key_indicator_lc) for key in greater_keys)): rule = 'greater' elif any(((key in key_indicator_lc) for key in less_keys)): rule = 'less' else: raise ValueError(f'Cannot infer the rule for key {key_indicator}, thus a specific rule must be specified.') self.rule = rule self.key_indicator = key_indicator if (self.rule is not None): self.compare_func = self.rule_map[self.rule] def before_run(self, runner): if (not self.out_dir): self.out_dir = runner.work_dir self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) if (self.out_dir != runner.work_dir): basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info(f'The best checkpoint will be saved to {self.out_dir} by {self.file_client.name}') if (self.save_best is not None): if (runner.meta is None): warnings.warn('runner.meta is None. Creating an empty one.') runner.meta = dict() runner.meta.setdefault('hook_msgs', dict()) self.best_ckpt_path = runner.meta['hook_msgs'].get('best_ckpt', None) def before_train_iter(self, runner): 'Evaluate the model only at the start of training by iteration.' if (self.by_epoch or (not self.initial_flag)): return if ((self.start is not None) and (runner.iter >= self.start)): self.after_train_iter(runner) self.initial_flag = False def before_train_epoch(self, runner): 'Evaluate the model only at the start of training by epoch.' if (not (self.by_epoch and self.initial_flag)): return if ((self.start is not None) and (runner.epoch >= self.start)): self.after_train_epoch(runner) self.initial_flag = False def after_train_iter(self, runner): 'Called after every training iter to evaluate the results.' if ((not self.by_epoch) and self._should_evaluate(runner)): for hook in runner._hooks: if isinstance(hook, LoggerHook): hook.after_train_iter(runner) runner.log_buffer.clear() self._do_evaluate(runner) def after_train_epoch(self, runner): 'Called after every training epoch to evaluate the results.' if (self.by_epoch and self._should_evaluate(runner)): self._do_evaluate(runner) def _do_evaluate(self, runner): 'perform evaluation and save ckpt.' results = self.test_fn(runner.model, self.dataloader) runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if (self.save_best and key_score): self._save_ckpt(runner, key_score) def _should_evaluate(self, runner): 'Judge whether to perform evaluation.\n\n Here is the rule to judge whether to perform evaluation:\n 1. It will not perform evaluation during the epoch/iteration interval,\n which is determined by ``self.interval``.\n 2. It will not perform evaluation if the start time is larger than\n current time.\n 3. It will not perform evaluation when current time is larger than\n the start time but during epoch/iteration interval.\n\n Returns:\n bool: The flag indicating whether to perform evaluation.\n ' if self.by_epoch: current = runner.epoch check_time = self.every_n_epochs else: current = runner.iter check_time = self.every_n_iters if (self.start is None): if (not check_time(runner, self.interval)): return False elif ((current + 1) < self.start): return False elif (((current + 1) - self.start) % self.interval): return False return True def _save_ckpt(self, runner, key_score): 'Save the best checkpoint.\n\n It will compare the score according to the compare function, write\n related information (best score, best checkpoint path) and save the\n best checkpoint into ``work_dir``.\n ' if self.by_epoch: current = f'epoch_{(runner.epoch + 1)}' (cur_type, cur_time) = ('epoch', (runner.epoch + 1)) else: current = f'iter_{(runner.iter + 1)}' (cur_type, cur_time) = ('iter', (runner.iter + 1)) best_score = runner.meta['hook_msgs'].get('best_score', self.init_value_map[self.rule]) if self.compare_func(key_score, best_score): best_score = key_score runner.meta['hook_msgs']['best_score'] = best_score if (self.best_ckpt_path and self.file_client.isfile(self.best_ckpt_path)): self.file_client.remove(self.best_ckpt_path) runner.logger.info(f'The previous best checkpoint {self.best_ckpt_path} was removed') best_ckpt_name = f'best_{self.key_indicator}_{current}.pth' self.best_ckpt_path = self.file_client.join_path(self.out_dir, best_ckpt_name) runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path runner.save_checkpoint(self.out_dir, filename_tmpl=best_ckpt_name, create_symlink=False) runner.logger.info(f'Now best checkpoint is saved as {best_ckpt_name}.') runner.logger.info(f'Best {self.key_indicator} is {best_score:0.4f} at {cur_time} {cur_type}.') def evaluate(self, runner, results): 'Evaluate the results.\n\n Args:\n runner (:obj:`mmcv.Runner`): The underlined training runner.\n results (list): Output results.\n ' eval_res = self.dataloader.dataset.evaluate(results, logger=runner.logger, **self.eval_kwargs) for (name, val) in eval_res.items(): runner.log_buffer.output[name] = val runner.log_buffer.ready = True if (self.save_best is not None): if (not eval_res): warnings.warn('Since `eval_res` is an empty dict, the behavior to save the best checkpoint will be skipped in this evaluation.') return None if (self.key_indicator == 'auto'): self._init_rule(self.rule, list(eval_res.keys())[0]) return eval_res[self.key_indicator] return None
class DistEvalHook(EvalHook): "Distributed evaluation hook.\n\n This hook will regularly perform evaluation in a given interval when\n performing in distributed environment.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader, whose dataset has\n implemented ``evaluate`` function.\n start (int | None, optional): Evaluation starting epoch. It enables\n evaluation before the training starts if ``start`` <= the resuming\n epoch. If None, whether to evaluate is merely decided by\n ``interval``. Default: None.\n interval (int): Evaluation interval. Default: 1.\n by_epoch (bool): Determine perform evaluation by epoch or by iteration.\n If set to True, it will perform by epoch. Otherwise, by iteration.\n default: True.\n save_best (str, optional): If a metric is specified, it would measure\n the best checkpoint during evaluation. The information about best\n checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep\n best score value and best checkpoint path, which will be also\n loaded when resume checkpoint. Options are the evaluation metrics\n on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox\n detection and instance segmentation. ``AR@100`` for proposal\n recall. If ``save_best`` is ``auto``, the first key of the returned\n ``OrderedDict`` result will be used. Default: None.\n rule (str | None, optional): Comparison rule for best score. If set to\n None, it will infer a reasonable rule. Keys such as 'acc', 'top'\n .etc will be inferred by 'greater' rule. Keys contain 'loss' will\n be inferred by 'less' rule. Options are 'greater', 'less', None.\n Default: None.\n test_fn (callable, optional): test a model with samples from a\n dataloader in a multi-gpu manner, and return the test results. If\n ``None``, the default test function ``mmcv.engine.multi_gpu_test``\n will be used. (default: ``None``)\n tmpdir (str | None): Temporary directory to save the results of all\n processes. Default: None.\n gpu_collect (bool): Whether to use gpu or cpu to collect results.\n Default: False.\n broadcast_bn_buffer (bool): Whether to broadcast the\n buffer(running_mean and running_var) of rank 0 to other rank\n before evaluation. Default: True.\n out_dir (str, optional): The root directory to save checkpoints. If not\n specified, `runner.work_dir` will be used by default. If specified,\n the `out_dir` will be the concatenation of `out_dir` and the last\n level directory of `runner.work_dir`.\n file_client_args (dict): Arguments to instantiate a FileClient.\n See :class:`mmcv.fileio.FileClient` for details. Default: None.\n **eval_kwargs: Evaluation arguments fed into the evaluate function of\n the dataset.\n " def __init__(self, dataloader, start=None, interval=1, by_epoch=True, save_best=None, rule=None, test_fn=None, greater_keys=None, less_keys=None, broadcast_bn_buffer=True, tmpdir=None, gpu_collect=False, out_dir=None, file_client_args=None, **eval_kwargs): if (test_fn is None): from mmcv.engine import multi_gpu_test test_fn = multi_gpu_test super().__init__(dataloader, start=start, interval=interval, by_epoch=by_epoch, save_best=save_best, rule=rule, test_fn=test_fn, greater_keys=greater_keys, less_keys=less_keys, out_dir=out_dir, file_client_args=file_client_args, **eval_kwargs) self.broadcast_bn_buffer = broadcast_bn_buffer self.tmpdir = tmpdir self.gpu_collect = gpu_collect def _do_evaluate(self, runner): 'perform evaluation and save ckpt.' if self.broadcast_bn_buffer: model = runner.model for (name, module) in model.named_modules(): if (isinstance(module, _BatchNorm) and module.track_running_stats): dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) tmpdir = self.tmpdir if (tmpdir is None): tmpdir = osp.join(runner.work_dir, '.eval_hook') results = self.test_fn(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) if (runner.rank == 0): print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) if (self.save_best and key_score): self._save_ckpt(runner, key_score)
class Hook(): stages = ('before_run', 'before_train_epoch', 'before_train_iter', 'after_train_iter', 'after_train_epoch', 'before_val_epoch', 'before_val_iter', 'after_val_iter', 'after_val_epoch', 'after_run') def before_run(self, runner): pass def after_run(self, runner): pass def before_epoch(self, runner): pass def after_epoch(self, runner): pass def before_iter(self, runner): pass def after_iter(self, runner): pass def before_train_epoch(self, runner): self.before_epoch(runner) def before_val_epoch(self, runner): self.before_epoch(runner) def after_train_epoch(self, runner): self.after_epoch(runner) def after_val_epoch(self, runner): self.after_epoch(runner) def before_train_iter(self, runner): self.before_iter(runner) def before_val_iter(self, runner): self.before_iter(runner) def after_train_iter(self, runner): self.after_iter(runner) def after_val_iter(self, runner): self.after_iter(runner) def every_n_epochs(self, runner, n): return ((((runner.epoch + 1) % n) == 0) if (n > 0) else False) def every_n_inner_iters(self, runner, n): return ((((runner.inner_iter + 1) % n) == 0) if (n > 0) else False) def every_n_iters(self, runner, n): return ((((runner.iter + 1) % n) == 0) if (n > 0) else False) def end_of_epoch(self, runner): return ((runner.inner_iter + 1) == len(runner.data_loader)) def is_last_epoch(self, runner): return ((runner.epoch + 1) == runner._max_epochs) def is_last_iter(self, runner): return ((runner.iter + 1) == runner._max_iters) def get_triggered_stages(self): trigger_stages = set() for stage in Hook.stages: if is_method_overridden(stage, Hook, self): trigger_stages.add(stage) method_stages_map = {'before_epoch': ['before_train_epoch', 'before_val_epoch'], 'after_epoch': ['after_train_epoch', 'after_val_epoch'], 'before_iter': ['before_train_iter', 'before_val_iter'], 'after_iter': ['after_train_iter', 'after_val_iter']} for (method, map_stages) in method_stages_map.items(): if is_method_overridden(method, Hook, self): trigger_stages.update(map_stages) return [stage for stage in Hook.stages if (stage in trigger_stages)]
@HOOKS.register_module() class IterTimerHook(Hook): def before_epoch(self, runner): self.t = time.time() def before_iter(self, runner): runner.log_buffer.update({'data_time': (time.time() - self.t)}) def after_iter(self, runner): runner.log_buffer.update({'time': (time.time() - self.t)}) self.t = time.time()
class LoggerHook(Hook): 'Base class for logger hooks.\n\n Args:\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default True.\n ' __metaclass__ = ABCMeta def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): self.interval = interval self.ignore_last = ignore_last self.reset_flag = reset_flag self.by_epoch = by_epoch @abstractmethod def log(self, runner): pass @staticmethod def is_scalar(val, include_np=True, include_torch=True): 'Tell the input variable is a scalar or not.\n\n Args:\n val: Input variable.\n include_np (bool): Whether include 0-d np.ndarray as a scalar.\n include_torch (bool): Whether include 0-d torch.Tensor as a scalar.\n\n Returns:\n bool: True or False.\n ' if isinstance(val, numbers.Number): return True elif (include_np and isinstance(val, np.ndarray) and (val.ndim == 0)): return True elif (include_torch and isinstance(val, torch.Tensor) and (len(val) == 1)): return True else: return False def get_mode(self, runner): if (runner.mode == 'train'): if ('time' in runner.log_buffer.output): mode = 'train' else: mode = 'val' elif (runner.mode == 'val'): mode = 'val' else: raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}") return mode def get_epoch(self, runner): if (runner.mode == 'train'): epoch = (runner.epoch + 1) elif (runner.mode == 'val'): epoch = runner.epoch else: raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}") return epoch def get_iter(self, runner, inner_iter=False): 'Get the current training iteration step.' if (self.by_epoch and inner_iter): current_iter = (runner.inner_iter + 1) else: current_iter = (runner.iter + 1) return current_iter def get_lr_tags(self, runner): tags = {} lrs = runner.current_lr() if isinstance(lrs, dict): for (name, value) in lrs.items(): tags[f'learning_rate/{name}'] = value[0] else: tags['learning_rate'] = lrs[0] return tags def get_momentum_tags(self, runner): tags = {} momentums = runner.current_momentum() if isinstance(momentums, dict): for (name, value) in momentums.items(): tags[f'momentum/{name}'] = value[0] else: tags['momentum'] = momentums[0] return tags def get_loggable_tags(self, runner, allow_scalar=True, allow_text=False, add_mode=True, tags_to_skip=('time', 'data_time')): tags = {} for (var, val) in runner.log_buffer.output.items(): if (var in tags_to_skip): continue if (self.is_scalar(val) and (not allow_scalar)): continue if (isinstance(val, str) and (not allow_text)): continue if add_mode: var = f'{self.get_mode(runner)}/{var}' tags[var] = val tags.update(self.get_lr_tags(runner)) tags.update(self.get_momentum_tags(runner)) return tags def before_run(self, runner): for hook in runner.hooks[::(- 1)]: if isinstance(hook, LoggerHook): hook.reset_flag = True break def before_epoch(self, runner): runner.log_buffer.clear() def after_train_iter(self, runner): if (self.by_epoch and self.every_n_inner_iters(runner, self.interval)): runner.log_buffer.average(self.interval) elif ((not self.by_epoch) and self.every_n_iters(runner, self.interval)): runner.log_buffer.average(self.interval) elif (self.end_of_epoch(runner) and (not self.ignore_last)): runner.log_buffer.average(self.interval) if runner.log_buffer.ready: self.log(runner) if self.reset_flag: runner.log_buffer.clear_output() def after_train_epoch(self, runner): if runner.log_buffer.ready: self.log(runner) if self.reset_flag: runner.log_buffer.clear_output() def after_val_epoch(self, runner): runner.log_buffer.average() self.log(runner) if self.reset_flag: runner.log_buffer.clear_output()
@HOOKS.register_module() class DvcliveLoggerHook(LoggerHook): 'Class to log metrics with dvclive.\n\n It requires `dvclive`_ to be installed.\n\n Args:\n model_file (str): Default None. If not None, after each epoch the\n model will be saved to {model_file}.\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n kwargs: Arguments for instantiating `Live`_.\n\n .. _dvclive:\n https://dvc.org/doc/dvclive\n\n .. _Live:\n https://dvc.org/doc/dvclive/api-reference/live#parameters\n ' def __init__(self, model_file=None, interval=10, ignore_last=True, reset_flag=False, by_epoch=True, **kwargs): super().__init__(interval, ignore_last, reset_flag, by_epoch) self.model_file = model_file self.import_dvclive(**kwargs) def import_dvclive(self, **kwargs): try: from dvclive import Live except ImportError: raise ImportError('Please run "pip install dvclive" to install dvclive') self.dvclive = Live(**kwargs) @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.dvclive.set_step(self.get_iter(runner)) for (k, v) in tags.items(): self.dvclive.log(k, v) @master_only def after_train_epoch(self, runner): super().after_train_epoch(runner) if (self.model_file is not None): runner.save_checkpoint(Path(self.model_file).parent, filename_tmpl=Path(self.model_file).name, create_symlink=False)
@HOOKS.register_module() class MlflowLoggerHook(LoggerHook): 'Class to log metrics and (optionally) a trained model to MLflow.\n\n It requires `MLflow`_ to be installed.\n\n Args:\n exp_name (str, optional): Name of the experiment to be used.\n Default None. If not None, set the active experiment.\n If experiment does not exist, an experiment with provided name\n will be created.\n tags (Dict[str], optional): Tags for the current run.\n Default None. If not None, set tags for the current run.\n log_model (bool, optional): Whether to log an MLflow artifact.\n Default True. If True, log runner.model as an MLflow artifact\n for the current run.\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n\n .. _MLflow:\n https://www.mlflow.org/docs/latest/index.html\n ' def __init__(self, exp_name=None, tags=None, log_model=True, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(MlflowLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_mlflow() self.exp_name = exp_name self.tags = tags self.log_model = log_model def import_mlflow(self): try: import mlflow import mlflow.pytorch as mlflow_pytorch except ImportError: raise ImportError('Please run "pip install mlflow" to install mlflow') self.mlflow = mlflow self.mlflow_pytorch = mlflow_pytorch @master_only def before_run(self, runner): super(MlflowLoggerHook, self).before_run(runner) if (self.exp_name is not None): self.mlflow.set_experiment(self.exp_name) if (self.tags is not None): self.mlflow.set_tags(self.tags) @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.mlflow.log_metrics(tags, step=self.get_iter(runner)) @master_only def after_run(self, runner): if self.log_model: self.mlflow_pytorch.log_model(runner.model, 'models', pip_requirements=[f'torch=={TORCH_VERSION}'])
@HOOKS.register_module() class NeptuneLoggerHook(LoggerHook): "Class to log metrics to NeptuneAI.\n\n It requires `Neptune`_ to be installed.\n\n Args:\n init_kwargs (dict): a dict contains the initialization keys as below:\n\n - project (str): Name of a project in a form of\n namespace/project_name. If None, the value of NEPTUNE_PROJECT\n environment variable will be taken.\n - api_token (str): User’s API token. If None, the value of\n NEPTUNE_API_TOKEN environment variable will be taken. Note: It is\n strongly recommended to use NEPTUNE_API_TOKEN environment\n variable rather than placing your API token in plain text in your\n source code.\n - name (str, optional, default is 'Untitled'): Editable name of the\n run. Name is displayed in the run's Details and in Runs table as\n a column.\n\n Check https://docs.neptune.ai/api-reference/neptune#init for more\n init arguments.\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than ``interval``. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: True.\n with_step (bool): If True, the step will be logged from\n ``self.get_iters``. Otherwise, step will not be logged.\n Default: True.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n\n .. _Neptune:\n https://docs.neptune.ai\n " def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=True, with_step=True, by_epoch=True): super(NeptuneLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_neptune() self.init_kwargs = init_kwargs self.with_step = with_step def import_neptune(self): try: import neptune.new as neptune except ImportError: raise ImportError('Please run "pip install neptune-client" to install neptune') self.neptune = neptune self.run = None @master_only def before_run(self, runner): if self.init_kwargs: self.run = self.neptune.init(**self.init_kwargs) else: self.run = self.neptune.init() @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: for (tag_name, tag_value) in tags.items(): if self.with_step: self.run[tag_name].log(tag_value, step=self.get_iter(runner)) else: tags['global_step'] = self.get_iter(runner) self.run[tag_name].log(tags) @master_only def after_run(self, runner): self.run.stop()
@HOOKS.register_module() class PaviLoggerHook(LoggerHook): "Class to visual model, log metrics (for internal use).\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys.\n add_graph (bool): Whether to visual model. Default: False.\n add_last_ckpt (bool): Whether to save checkpoint after run.\n Default: False.\n interval (int): Logging interval (every k iterations). Default: True.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n img_key (string): Get image data from Dataset. Default: 'img_info'.\n " def __init__(self, init_kwargs=None, add_graph=False, add_last_ckpt=False, interval=10, ignore_last=True, reset_flag=False, by_epoch=True, img_key='img_info'): super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.init_kwargs = init_kwargs self.add_graph = add_graph self.add_last_ckpt = add_last_ckpt self.img_key = img_key @master_only def before_run(self, runner): super(PaviLoggerHook, self).before_run(runner) try: from pavi import SummaryWriter except ImportError: raise ImportError('Please run "pip install pavi" to install pavi.') self.run_name = runner.work_dir.split('/')[(- 1)] if (not self.init_kwargs): self.init_kwargs = dict() self.init_kwargs['name'] = self.run_name self.init_kwargs['model'] = runner._model_name if (runner.meta is not None): if ('config_dict' in runner.meta): config_dict = runner.meta['config_dict'] assert isinstance(config_dict, dict), f'meta["config_dict"] has to be of a dict, but got {type(config_dict)}' elif ('config_file' in runner.meta): config_file = runner.meta['config_file'] config_dict = dict(mmcv.Config.fromfile(config_file)) else: config_dict = None if (config_dict is not None): config_dict = config_dict.copy() config_dict.setdefault('max_iter', runner.max_iters) config_dict = json.loads(mmcv.dump(config_dict, file_format='json')) session_text = yaml.dump(config_dict) self.init_kwargs['session_text'] = session_text self.writer = SummaryWriter(**self.init_kwargs) def get_step(self, runner): 'Get the total training step/epoch.' if ((self.get_mode(runner) == 'val') and self.by_epoch): return self.get_epoch(runner) else: return self.get_iter(runner) @master_only def log(self, runner): tags = self.get_loggable_tags(runner, add_mode=False) if tags: self.writer.add_scalars(self.get_mode(runner), tags, self.get_step(runner)) @master_only def after_run(self, runner): if self.add_last_ckpt: ckpt_path = osp.join(runner.work_dir, 'latest.pth') if osp.islink(ckpt_path): ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path)) if osp.isfile(ckpt_path): iteration = (runner.epoch if self.by_epoch else runner.iter) return self.writer.add_snapshot_file(tag=self.run_name, snapshot_file_path=ckpt_path, iteration=iteration) self.writer.close() @master_only def before_epoch(self, runner): if ((runner.epoch == 0) and self.add_graph): if is_module_wrapper(runner.model): _model = runner.model.module else: _model = runner.model device = next(_model.parameters()).device data = next(iter(runner.data_loader)) image = data[self.img_key][0:1].to(device) with torch.no_grad(): self.writer.add_graph(_model, image)
@HOOKS.register_module() class SegmindLoggerHook(LoggerHook): 'Class to log metrics to Segmind.\n\n It requires `Segmind`_ to be installed.\n\n Args:\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default True.\n\n .. _Segmind:\n https://docs.segmind.com/python-library\n ' def __init__(self, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(SegmindLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_segmind() def import_segmind(self): try: import segmind except ImportError: raise ImportError("Please run 'pip install segmind' to install segmind") self.log_metrics = segmind.tracking.fluent.log_metrics self.mlflow_log = segmind.utils.logging_utils.try_mlflow_log @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: self.mlflow_log(self.log_metrics, tags, step=runner.epoch, epoch=runner.epoch)
@HOOKS.register_module() class TensorboardLoggerHook(LoggerHook): 'Class to log metrics to Tensorboard.\n\n Args:\n log_dir (string): Save directory location. Default: None. If default\n values are used, directory location is ``runner.work_dir``/tf_logs.\n interval (int): Logging interval (every k iterations). Default: True.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n by_epoch (bool): Whether EpochBasedRunner is used. Default: True.\n ' def __init__(self, log_dir=None, interval=10, ignore_last=True, reset_flag=False, by_epoch=True): super(TensorboardLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.log_dir = log_dir @master_only def before_run(self, runner): super(TensorboardLoggerHook, self).before_run(runner) if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.1'))): try: from tensorboardX import SummaryWriter except ImportError: raise ImportError('Please install tensorboardX to use TensorboardLoggerHook.') else: try: from torch.utils.tensorboard import SummaryWriter except ImportError: raise ImportError('Please run "pip install future tensorboard" to install the dependencies to use torch.utils.tensorboard (applicable to PyTorch 1.1 or higher)') if (self.log_dir is None): self.log_dir = osp.join(runner.work_dir, 'tf_logs') self.writer = SummaryWriter(self.log_dir) @master_only def log(self, runner): tags = self.get_loggable_tags(runner, allow_text=True) for (tag, val) in tags.items(): if isinstance(val, str): self.writer.add_text(tag, val, self.get_iter(runner)) else: self.writer.add_scalar(tag, val, self.get_iter(runner)) @master_only def after_run(self, runner): self.writer.close()
@HOOKS.register_module() class TextLoggerHook(LoggerHook): "Logger hook in text.\n\n In this logger hook, the information will be printed on terminal and\n saved in json file.\n\n Args:\n by_epoch (bool, optional): Whether EpochBasedRunner is used.\n Default: True.\n interval (int, optional): Logging interval (every k iterations).\n Default: 10.\n ignore_last (bool, optional): Ignore the log of last iterations in each\n epoch if less than :attr:`interval`. Default: True.\n reset_flag (bool, optional): Whether to clear the output buffer after\n logging. Default: False.\n interval_exp_name (int, optional): Logging interval for experiment\n name. This feature is to help users conveniently get the experiment\n information from screen or log file. Default: 1000.\n out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.\n If ``out_dir`` is specified, logs will be copied to a new directory\n which is the concatenation of ``out_dir`` and the last level\n directory of ``runner.work_dir``. Default: None.\n `New in version 1.3.16.`\n out_suffix (str or tuple[str], optional): Those filenames ending with\n ``out_suffix`` will be copied to ``out_dir``.\n Default: ('.log.json', '.log', '.py').\n `New in version 1.3.16.`\n keep_local (bool, optional): Whether to keep local log when\n :attr:`out_dir` is specified. If False, the local log will be\n removed. Default: True.\n `New in version 1.3.16.`\n file_client_args (dict, optional): Arguments to instantiate a\n FileClient. See :class:`mmcv.fileio.FileClient` for details.\n Default: None.\n `New in version 1.3.16.`\n " def __init__(self, by_epoch=True, interval=10, ignore_last=True, reset_flag=False, interval_exp_name=1000, out_dir=None, out_suffix=('.log.json', '.log', '.py'), keep_local=True, file_client_args=None): super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.by_epoch = by_epoch self.time_sec_tot = 0 self.interval_exp_name = interval_exp_name if ((out_dir is None) and (file_client_args is not None)): raise ValueError('file_client_args should be "None" when `out_dir` is notspecified.') self.out_dir = out_dir if (not ((out_dir is None) or isinstance(out_dir, str) or is_tuple_of(out_dir, str))): raise TypeError('out_dir should be "None" or string or tuple of string, but got {out_dir}') self.out_suffix = out_suffix self.keep_local = keep_local self.file_client_args = file_client_args if (self.out_dir is not None): self.file_client = FileClient.infer_client(file_client_args, self.out_dir) def before_run(self, runner): super(TextLoggerHook, self).before_run(runner) if (self.out_dir is not None): self.file_client = FileClient.infer_client(self.file_client_args, self.out_dir) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) self.out_dir = self.file_client.join_path(self.out_dir, basename) runner.logger.info(f'Text logs will be saved to {self.out_dir} by {self.file_client.name} after the training process.') self.start_iter = runner.iter self.json_log_path = osp.join(runner.work_dir, f'{runner.timestamp}.log.json') if (runner.meta is not None): self._dump_log(runner.meta, runner) def _get_max_memory(self, runner): device = getattr(runner.model, 'output_device', None) mem = torch.cuda.max_memory_allocated(device=device) mem_mb = torch.tensor([(int(mem) // (1024 * 1024))], dtype=torch.int, device=device) if (runner.world_size > 1): dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX) return mem_mb.item() def _log_info(self, log_dict, runner): if ((runner.meta is not None) and ('exp_name' in runner.meta)): if (self.every_n_iters(runner, self.interval_exp_name) or (self.by_epoch and self.end_of_epoch(runner))): exp_info = f"Exp name: {runner.meta['exp_name']}" runner.logger.info(exp_info) if (log_dict['mode'] == 'train'): if isinstance(log_dict['lr'], dict): lr_str = [] for (k, val) in log_dict['lr'].items(): lr_str.append(f'lr_{k}: {val:.3e}') lr_str = ' '.join(lr_str) else: lr_str = f"lr: {log_dict['lr']:.3e}" if self.by_epoch: log_str = f"Epoch [{log_dict['epoch']}][{log_dict['iter']}/{len(runner.data_loader)}] " else: log_str = f"Iter [{log_dict['iter']}/{runner.max_iters}] " log_str += f'{lr_str}, ' if ('time' in log_dict.keys()): self.time_sec_tot += (log_dict['time'] * self.interval) time_sec_avg = (self.time_sec_tot / ((runner.iter - self.start_iter) + 1)) eta_sec = (time_sec_avg * ((runner.max_iters - runner.iter) - 1)) eta_str = str(datetime.timedelta(seconds=int(eta_sec))) log_str += f'eta: {eta_str}, ' log_str += f"time: {log_dict['time']:.3f}, data_time: {log_dict['data_time']:.3f}, " if torch.cuda.is_available(): log_str += f"memory: {log_dict['memory']}, " elif self.by_epoch: log_str = f"Epoch({log_dict['mode']}) [{log_dict['epoch']}][{log_dict['iter']}] " else: log_str = f"Iter({log_dict['mode']}) [{log_dict['iter']}] " log_items = [] for (name, val) in log_dict.items(): if (name in ['mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', 'memory', 'epoch']): continue if isinstance(val, float): val = f'{val:.4f}' log_items.append(f'{name}: {val}') log_str += ', '.join(log_items) runner.logger.info(log_str) def _dump_log(self, log_dict, runner): json_log = OrderedDict() for (k, v) in log_dict.items(): json_log[k] = self._round_float(v) if (runner.rank == 0): with open(self.json_log_path, 'a+') as f: mmcv.dump(json_log, f, file_format='json') f.write('\n') def _round_float(self, items): if isinstance(items, list): return [self._round_float(item) for item in items] elif isinstance(items, float): return round(items, 5) else: return items def log(self, runner): if ('eval_iter_num' in runner.log_buffer.output): cur_iter = runner.log_buffer.output.pop('eval_iter_num') else: cur_iter = self.get_iter(runner, inner_iter=True) log_dict = OrderedDict(mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=cur_iter) cur_lr = runner.current_lr() if isinstance(cur_lr, list): log_dict['lr'] = cur_lr[0] else: assert isinstance(cur_lr, dict) log_dict['lr'] = {} for (k, lr_) in cur_lr.items(): assert isinstance(lr_, list) log_dict['lr'].update({k: lr_[0]}) if ('time' in runner.log_buffer.output): if torch.cuda.is_available(): log_dict['memory'] = self._get_max_memory(runner) log_dict = dict(log_dict, **runner.log_buffer.output) self._log_info(log_dict, runner) self._dump_log(log_dict, runner) return log_dict def after_run(self, runner): if (self.out_dir is not None): for filename in scandir(runner.work_dir, self.out_suffix, True): local_filepath = osp.join(runner.work_dir, filename) out_filepath = self.file_client.join_path(self.out_dir, filename) with open(local_filepath, 'r') as f: self.file_client.put_text(f.read(), out_filepath) runner.logger.info(f'The file {local_filepath} has been uploaded to {out_filepath}.') if (not self.keep_local): os.remove(local_filepath) runner.logger.info(f'{local_filepath} was removed due to the `self.keep_local=False`')
@HOOKS.register_module() class WandbLoggerHook(LoggerHook): "Class to log metrics with wandb.\n\n It requires `wandb`_ to be installed.\n\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys. Check\n https://docs.wandb.ai/ref/python/init for more init arguments.\n interval (int): Logging interval (every k iterations).\n Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`.\n Default: True.\n reset_flag (bool): Whether to clear the output buffer after logging.\n Default: False.\n commit (bool): Save the metrics dict to the wandb server and increment\n the step. If false ``wandb.log`` just updates the current metrics\n dict with the row argument and metrics won't be saved until\n ``wandb.log`` is called with ``commit=True``.\n Default: True.\n by_epoch (bool): Whether EpochBasedRunner is used.\n Default: True.\n with_step (bool): If True, the step will be logged from\n ``self.get_iters``. Otherwise, step will not be logged.\n Default: True.\n log_artifact (bool): If True, artifacts in {work_dir} will be uploaded\n to wandb after training ends.\n Default: True\n `New in version 1.4.3.`\n out_suffix (str or tuple[str], optional): Those filenames ending with\n ``out_suffix`` will be uploaded to wandb.\n Default: ('.log.json', '.log', '.py').\n `New in version 1.4.3.`\n\n .. _wandb:\n https://docs.wandb.ai\n " def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=False, commit=True, by_epoch=True, with_step=True, log_artifact=True, out_suffix=('.log.json', '.log', '.py')): super(WandbLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch) self.import_wandb() self.init_kwargs = init_kwargs self.commit = commit self.with_step = with_step self.log_artifact = log_artifact self.out_suffix = out_suffix def import_wandb(self): try: import wandb except ImportError: raise ImportError('Please run "pip install wandb" to install wandb') self.wandb = wandb @master_only def before_run(self, runner): super(WandbLoggerHook, self).before_run(runner) if (self.wandb is None): self.import_wandb() if self.init_kwargs: self.wandb.init(**self.init_kwargs) else: self.wandb.init() @master_only def log(self, runner): tags = self.get_loggable_tags(runner) if tags: if self.with_step: self.wandb.log(tags, step=self.get_iter(runner), commit=self.commit) else: tags['global_step'] = self.get_iter(runner) self.wandb.log(tags, commit=self.commit) @master_only def after_run(self, runner): if self.log_artifact: wandb_artifact = self.wandb.Artifact(name='artifacts', type='model') for filename in scandir(runner.work_dir, self.out_suffix, True): local_filepath = osp.join(runner.work_dir, filename) wandb_artifact.add_file(local_filepath) self.wandb.log_artifact(wandb_artifact) self.wandb.join()
class LrUpdaterHook(Hook): "LR Scheduler in MMCV.\n\n Args:\n by_epoch (bool): LR changes epoch by epoch\n warmup (string): Type of warmup used. It can be None(use no warmup),\n 'constant', 'linear' or 'exp'\n warmup_iters (int): The number of iterations or epochs that warmup\n lasts\n warmup_ratio (float): LR used at the beginning of warmup equals to\n warmup_ratio * initial_lr\n warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters\n means the number of epochs that warmup lasts, otherwise means the\n number of iteration that warmup lasts\n " def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.1, warmup_by_epoch=False): if (warmup is not None): if (warmup not in ['constant', 'linear', 'exp']): raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"') if (warmup is not None): assert (warmup_iters > 0), '"warmup_iters" must be a positive integer' assert (0 < warmup_ratio <= 1.0), '"warmup_ratio" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.warmup_by_epoch = warmup_by_epoch if self.warmup_by_epoch: self.warmup_epochs = self.warmup_iters self.warmup_iters = None else: self.warmup_epochs = None self.base_lr = [] self.regular_lr = [] def _set_lr(self, runner, lr_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, lr) in zip(optim.param_groups, lr_groups[k]): param_group['lr'] = lr else: for (param_group, lr) in zip(runner.optimizer.param_groups, lr_groups): param_group['lr'] = lr def get_lr(self, runner, base_lr): raise NotImplementedError def get_regular_lr(self, runner): if isinstance(runner.optimizer, dict): lr_groups = {} for k in runner.optimizer.keys(): _lr_group = [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr[k]] lr_groups.update({k: _lr_group}) return lr_groups else: return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): if (self.warmup == 'constant'): warmup_lr = [(_lr * self.warmup_ratio) for _lr in regular_lr] elif (self.warmup == 'linear'): k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio)) warmup_lr = [(_lr * (1 - k)) for _lr in regular_lr] elif (self.warmup == 'exp'): k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters))) warmup_lr = [(_lr * k) for _lr in regular_lr] return warmup_lr if isinstance(self.regular_lr, dict): lr_groups = {} for (key, regular_lr) in self.regular_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.regular_lr) def before_run(self, runner): if isinstance(runner.optimizer, dict): self.base_lr = {} for (k, optim) in runner.optimizer.items(): for group in optim.param_groups: group.setdefault('initial_lr', group['lr']) _base_lr = [group['initial_lr'] for group in optim.param_groups] self.base_lr.update({k: _base_lr}) else: for group in runner.optimizer.param_groups: group.setdefault('initial_lr', group['lr']) self.base_lr = [group['initial_lr'] for group in runner.optimizer.param_groups] def before_train_epoch(self, runner): if (self.warmup_iters is None): epoch_len = len(runner.data_loader) self.warmup_iters = (self.warmup_epochs * epoch_len) if (not self.by_epoch): return self.regular_lr = self.get_regular_lr(runner) self._set_lr(runner, self.regular_lr) def before_train_iter(self, runner): cur_iter = runner.iter if (not self.by_epoch): self.regular_lr = self.get_regular_lr(runner) if ((self.warmup is None) or (cur_iter >= self.warmup_iters)): self._set_lr(runner, self.regular_lr) else: warmup_lr = self.get_warmup_lr(cur_iter) self._set_lr(runner, warmup_lr) elif self.by_epoch: if ((self.warmup is None) or (cur_iter > self.warmup_iters)): return elif (cur_iter == self.warmup_iters): self._set_lr(runner, self.regular_lr) else: warmup_lr = self.get_warmup_lr(cur_iter) self._set_lr(runner, warmup_lr)
@HOOKS.register_module() class FixedLrUpdaterHook(LrUpdaterHook): def __init__(self, **kwargs): super(FixedLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): return base_lr
@HOOKS.register_module() class StepLrUpdaterHook(LrUpdaterHook): "Step LR scheduler with min_lr clipping.\n\n Args:\n step (int | list[int]): Step to decay the LR. If an int value is given,\n regard it as the decay interval. If a list is given, decay LR at\n these steps.\n gamma (float, optional): Decay LR ratio. Default: 0.1.\n min_lr (float, optional): Minimum LR value to keep. If LR after decay\n is lower than `min_lr`, it will be clipped to this value. If None\n is given, we don't perform lr clipping. Default: None.\n " def __init__(self, step, gamma=0.1, min_lr=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([(s > 0) for s in step]) elif isinstance(step, int): assert (step > 0) else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_lr = min_lr super(StepLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) if isinstance(self.step, int): exp = (progress // self.step) else: exp = len(self.step) for (i, s) in enumerate(self.step): if (progress < s): exp = i break lr = (base_lr * (self.gamma ** exp)) if (self.min_lr is not None): lr = max(lr, self.min_lr) return lr
@HOOKS.register_module() class ExpLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, **kwargs): self.gamma = gamma super(ExpLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) return (base_lr * (self.gamma ** progress))
@HOOKS.register_module() class PolyLrUpdaterHook(LrUpdaterHook): def __init__(self, power=1.0, min_lr=0.0, **kwargs): self.power = power self.min_lr = min_lr super(PolyLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters coeff = ((1 - (progress / max_progress)) ** self.power) return (((base_lr - self.min_lr) * coeff) + self.min_lr)
@HOOKS.register_module() class InvLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, power=1.0, **kwargs): self.gamma = gamma self.power = power super(InvLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) return (base_lr * ((1 + (self.gamma * progress)) ** (- self.power)))
@HOOKS.register_module() class CosineAnnealingLrUpdaterHook(LrUpdaterHook): def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr return annealing_cos(base_lr, target_lr, (progress / max_progress))
@HOOKS.register_module() class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): 'Flat + Cosine lr schedule.\n\n Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501\n\n Args:\n start_percent (float): When to start annealing the learning rate\n after the percentage of the total training steps.\n The value should be in range [0, 1).\n Default: 0.75\n min_lr (float, optional): The minimum lr. Default: None.\n min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.\n Either `min_lr` or `min_lr_ratio` should be specified.\n Default: None.\n ' def __init__(self, start_percent=0.75, min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) if ((start_percent < 0) or (start_percent > 1) or (not isinstance(start_percent, float))): raise ValueError(f'expected float between 0 and 1 start_percent, but got {start_percent}') self.start_percent = start_percent self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: start = round((runner.max_epochs * self.start_percent)) progress = (runner.epoch - start) max_progress = (runner.max_epochs - start) else: start = round((runner.max_iters * self.start_percent)) progress = (runner.iter - start) max_progress = (runner.max_iters - start) if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr if (progress < 0): return base_lr else: return annealing_cos(base_lr, target_lr, (progress / max_progress))
@HOOKS.register_module() class CosineRestartLrUpdaterHook(LrUpdaterHook): 'Cosine annealing with restarts learning rate scheme.\n\n Args:\n periods (list[int]): Periods for each cosine anneling cycle.\n restart_weights (list[float], optional): Restart weights at each\n restart iteration. Default: [1].\n min_lr (float, optional): The minimum lr. Default: None.\n min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.\n Either `min_lr` or `min_lr_ratio` should be specified.\n Default: None.\n ' def __init__(self, periods, restart_weights=[1], min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) self.periods = periods self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio self.restart_weights = restart_weights assert (len(self.periods) == len(self.restart_weights)), 'periods and restart_weights should have the same length.' super(CosineRestartLrUpdaterHook, self).__init__(**kwargs) self.cumulative_periods = [sum(self.periods[0:(i + 1)]) for i in range(0, len(self.periods))] def get_lr(self, runner, base_lr): if self.by_epoch: progress = runner.epoch else: progress = runner.iter if (self.min_lr_ratio is not None): target_lr = (base_lr * self.min_lr_ratio) else: target_lr = self.min_lr idx = get_position_from_periods(progress, self.cumulative_periods) current_weight = self.restart_weights[idx] nearest_restart = (0 if (idx == 0) else self.cumulative_periods[(idx - 1)]) current_periods = self.periods[idx] alpha = min(((progress - nearest_restart) / current_periods), 1) return annealing_cos(base_lr, target_lr, alpha, current_weight)
def get_position_from_periods(iteration, cumulative_periods): 'Get the position from a period list.\n\n It will return the index of the right-closest number in the period list.\n For example, the cumulative_periods = [100, 200, 300, 400],\n if iteration == 50, return 0;\n if iteration == 210, return 2;\n if iteration == 300, return 3.\n\n Args:\n iteration (int): Current iteration.\n cumulative_periods (list[int]): Cumulative period list.\n\n Returns:\n int: The position of the right-closest number in the period list.\n ' for (i, period) in enumerate(cumulative_periods): if (iteration < period): return i raise ValueError(f'Current iteration {iteration} exceeds cumulative_periods {cumulative_periods}')
@HOOKS.register_module() class CyclicLrUpdaterHook(LrUpdaterHook): "Cyclic LR Scheduler.\n\n Implement the cyclical learning rate policy (CLR) described in\n https://arxiv.org/pdf/1506.01186.pdf\n\n Different from the original paper, we use cosine annealing rather than\n triangular policy inside a cycle. This improves the performance in the\n 3D detection area.\n\n Args:\n by_epoch (bool, optional): Whether to update LR by epoch.\n target_ratio (tuple[float], optional): Relative ratio of the highest LR\n and the lowest LR to the initial LR.\n cyclic_times (int, optional): Number of cycles during training\n step_ratio_up (float, optional): The ratio of the increasing process of\n LR in the total cycle.\n anneal_strategy (str, optional): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing. Default: 'cos'.\n gamma (float, optional): Cycle decay ratio. Default: 1.\n It takes values in the range (0, 1]. The difference between the\n maximum learning rate and the minimum learning rate decreases\n periodically when it is less than 1. `New in version 1.4.4.`\n " def __init__(self, by_epoch=False, target_ratio=(10, 0.0001), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', gamma=1, **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, (target_ratio / 100000.0)) elif isinstance(target_ratio, tuple): target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio) else: raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}') assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats' assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)' assert (0 < gamma <= 1), '"gamma" must be in range (0, 1]' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.gamma = gamma self.max_iter_per_phase = None self.lr_phases = [] if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear assert (not by_epoch), 'currently only support "by_epoch" = False' super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicLrUpdaterHook, self).before_run(runner) self.max_iter_per_phase = (runner.max_iters // self.cyclic_times) iter_up_phase = int((self.step_ratio_up * self.max_iter_per_phase)) self.lr_phases.append([0, iter_up_phase, 1, self.target_ratio[0]]) self.lr_phases.append([iter_up_phase, self.max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]]) def get_lr(self, runner, base_lr): curr_iter = (runner.iter % self.max_iter_per_phase) curr_cycle = (runner.iter // self.max_iter_per_phase) scale = (self.gamma ** curr_cycle) for (start_iter, end_iter, start_ratio, end_ratio) in self.lr_phases: if (start_iter <= curr_iter < end_iter): if (start_iter == 0): end_ratio = ((1 - scale) + (end_ratio * scale)) else: start_ratio = ((1 - scale) + (start_ratio * scale)) progress = (curr_iter - start_iter) return self.anneal_func((base_lr * start_ratio), (base_lr * end_ratio), (progress / (end_iter - start_iter)))
@HOOKS.register_module() class OneCycleLrUpdaterHook(LrUpdaterHook): "One Cycle LR Scheduler.\n\n The 1cycle learning rate policy changes the learning rate after every\n batch. The one cycle learning rate policy is described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n Args:\n max_lr (float or list): Upper learning rate boundaries in the cycle\n for each parameter group.\n total_steps (int, optional): The total number of steps in the cycle.\n Note that if a value is not provided here, it will be the max_iter\n of runner. Default: None.\n pct_start (float): The percentage of the cycle (in number of steps)\n spent increasing the learning rate.\n Default: 0.3\n anneal_strategy (str): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing.\n Default: 'cos'\n div_factor (float): Determines the initial learning rate via\n initial_lr = max_lr/div_factor\n Default: 25\n final_div_factor (float): Determines the minimum learning rate via\n min_lr = initial_lr/final_div_factor\n Default: 1e4\n three_phase (bool): If three_phase is True, use a third phase of the\n schedule to annihilate the learning rate according to\n final_div_factor instead of modifying the second phase (the first\n two phases will be symmetrical about the step indicated by\n pct_start).\n Default: False\n " def __init__(self, max_lr, total_steps=None, pct_start=0.3, anneal_strategy='cos', div_factor=25, final_div_factor=10000.0, three_phase=False, **kwargs): if ('by_epoch' not in kwargs): kwargs['by_epoch'] = False else: assert (not kwargs['by_epoch']), 'currently only support "by_epoch" = False' if (not isinstance(max_lr, (numbers.Number, list, dict))): raise ValueError(f'the type of max_lr must be the one of list or dict, but got {type(max_lr)}') self._max_lr = max_lr if (total_steps is not None): if (not isinstance(total_steps, int)): raise ValueError(f'the type of total_steps must be int, butgot {type(total_steps)}') self.total_steps = total_steps if ((pct_start < 0) or (pct_start > 1) or (not isinstance(pct_start, float))): raise ValueError(f'expected float between 0 and 1 pct_start, but got {pct_start}') self.pct_start = pct_start if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear self.div_factor = div_factor self.final_div_factor = final_div_factor self.three_phase = three_phase self.lr_phases = [] super(OneCycleLrUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if hasattr(self, 'total_steps'): total_steps = self.total_steps else: total_steps = runner.max_iters if (total_steps < runner.max_iters): raise ValueError(f'The total steps must be greater than or equal to max iterations {runner.max_iters} of runner, but total steps is {total_steps}.') if isinstance(runner.optimizer, dict): self.base_lr = {} for (k, optim) in runner.optimizer.items(): _max_lr = format_param(k, optim, self._max_lr) self.base_lr[k] = [(lr / self.div_factor) for lr in _max_lr] for (group, lr) in zip(optim.param_groups, self.base_lr[k]): group.setdefault('initial_lr', lr) else: k = type(runner.optimizer).__name__ _max_lr = format_param(k, runner.optimizer, self._max_lr) self.base_lr = [(lr / self.div_factor) for lr in _max_lr] for (group, lr) in zip(runner.optimizer.param_groups, self.base_lr): group.setdefault('initial_lr', lr) if self.three_phase: self.lr_phases.append([(float((self.pct_start * total_steps)) - 1), 1, self.div_factor]) self.lr_phases.append([(float(((2 * self.pct_start) * total_steps)) - 2), self.div_factor, 1]) self.lr_phases.append([(total_steps - 1), 1, (1 / self.final_div_factor)]) else: self.lr_phases.append([(float((self.pct_start * total_steps)) - 1), 1, self.div_factor]) self.lr_phases.append([(total_steps - 1), self.div_factor, (1 / self.final_div_factor)]) def get_lr(self, runner, base_lr): curr_iter = runner.iter start_iter = 0 for (i, (end_iter, start_lr, end_lr)) in enumerate(self.lr_phases): if (curr_iter <= end_iter): pct = ((curr_iter - start_iter) / (end_iter - start_iter)) lr = self.anneal_func((base_lr * start_lr), (base_lr * end_lr), pct) break start_iter = end_iter return lr
def annealing_cos(start, end, factor, weight=1): 'Calculate annealing cos learning rate.\n\n Cosine anneal from `weight * start + (1 - weight) * end` to `end` as\n percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the cosine annealing.\n end (float): The ending learing rate of the cosine annealing.\n factor (float): The coefficient of `pi` when calculating the current\n percentage. Range from 0.0 to 1.0.\n weight (float, optional): The combination factor of `start` and `end`\n when calculating the actual starting learning rate. Default to 1.\n ' cos_out = (cos((pi * factor)) + 1) return (end + (((0.5 * weight) * (start - end)) * cos_out))
def annealing_linear(start, end, factor): 'Calculate annealing linear learning rate.\n\n Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the linear annealing.\n end (float): The ending learing rate of the linear annealing.\n factor (float): The coefficient of `pi` when calculating the current\n percentage. Range from 0.0 to 1.0.\n ' return (start + ((end - start) * factor))
def format_param(name, optim, param): if isinstance(param, numbers.Number): return ([param] * len(optim.param_groups)) elif isinstance(param, (list, tuple)): if (len(param) != len(optim.param_groups)): raise ValueError(f'expected {len(optim.param_groups)} values for {name}, got {len(param)}') return param else: if (name not in param): raise KeyError(f'{name} is not found in {param.keys()}') return param[name]
@HOOKS.register_module() class EmptyCacheHook(Hook): def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): self._before_epoch = before_epoch self._after_epoch = after_epoch self._after_iter = after_iter def after_iter(self, runner): if self._after_iter: torch.cuda.empty_cache() def before_epoch(self, runner): if self._before_epoch: torch.cuda.empty_cache() def after_epoch(self, runner): if self._after_epoch: torch.cuda.empty_cache()
class MomentumUpdaterHook(Hook): def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9): if (warmup is not None): if (warmup not in ['constant', 'linear', 'exp']): raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"') if (warmup is not None): assert (warmup_iters > 0), '"warmup_iters" must be a positive integer' assert (0 < warmup_ratio <= 1.0), '"warmup_momentum" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.base_momentum = [] self.regular_momentum = [] def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) else: for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, base_momentum): raise NotImplementedError def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for k in runner.optimizer.keys(): _momentum_group = [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum[k]] momentum_groups.update({k: _momentum_group}) return momentum_groups else: return [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum] def get_warmup_momentum(self, cur_iters): def _get_warmup_momentum(cur_iters, regular_momentum): if (self.warmup == 'constant'): warmup_momentum = [(_momentum / self.warmup_ratio) for _momentum in regular_momentum] elif (self.warmup == 'linear'): k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio)) warmup_momentum = [(_momentum / (1 - k)) for _momentum in regular_momentum] elif (self.warmup == 'exp'): k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters))) warmup_momentum = [(_momentum / k) for _momentum in regular_momentum] return warmup_momentum if isinstance(self.regular_momentum, dict): momentum_groups = {} for (key, regular_momentum) in self.regular_momentum.items(): momentum_groups[key] = _get_warmup_momentum(cur_iters, regular_momentum) return momentum_groups else: return _get_warmup_momentum(cur_iters, self.regular_momentum) def before_run(self, runner): if isinstance(runner.optimizer, dict): self.base_momentum = {} for (k, optim) in runner.optimizer.items(): for group in optim.param_groups: if ('momentum' in group.keys()): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) _base_momentum = [group['initial_momentum'] for group in optim.param_groups] self.base_momentum.update({k: _base_momentum}) else: for group in runner.optimizer.param_groups: if ('momentum' in group.keys()): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) self.base_momentum = [group['initial_momentum'] for group in runner.optimizer.param_groups] def before_train_epoch(self, runner): if (not self.by_epoch): return self.regular_momentum = self.get_regular_momentum(runner) self._set_momentum(runner, self.regular_momentum) def before_train_iter(self, runner): cur_iter = runner.iter if (not self.by_epoch): self.regular_momentum = self.get_regular_momentum(runner) if ((self.warmup is None) or (cur_iter >= self.warmup_iters)): self._set_momentum(runner, self.regular_momentum) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum) elif self.by_epoch: if ((self.warmup is None) or (cur_iter > self.warmup_iters)): return elif (cur_iter == self.warmup_iters): self._set_momentum(runner, self.regular_momentum) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum)
@HOOKS.register_module() class StepMomentumUpdaterHook(MomentumUpdaterHook): "Step momentum scheduler with min value clipping.\n\n Args:\n step (int | list[int]): Step to decay the momentum. If an int value is\n given, regard it as the decay interval. If a list is given, decay\n momentum at these steps.\n gamma (float, optional): Decay momentum ratio. Default: 0.5.\n min_momentum (float, optional): Minimum momentum value to keep. If\n momentum after decay is lower than this value, it will be clipped\n accordingly. If None is given, we don't perform lr clipping.\n Default: None.\n " def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([(s > 0) for s in step]) elif isinstance(step, int): assert (step > 0) else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_momentum = min_momentum super(StepMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): progress = (runner.epoch if self.by_epoch else runner.iter) if isinstance(self.step, int): exp = (progress // self.step) else: exp = len(self.step) for (i, s) in enumerate(self.step): if (progress < s): exp = i break momentum = (base_momentum * (self.gamma ** exp)) if (self.min_momentum is not None): momentum = max(momentum, self.min_momentum) return momentum
@HOOKS.register_module() class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): assert ((min_momentum is None) ^ (min_momentum_ratio is None)) self.min_momentum = min_momentum self.min_momentum_ratio = min_momentum_ratio super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if (self.min_momentum_ratio is not None): target_momentum = (base_momentum * self.min_momentum_ratio) else: target_momentum = self.min_momentum return annealing_cos(base_momentum, target_momentum, (progress / max_progress))
@HOOKS.register_module() class CyclicMomentumUpdaterHook(MomentumUpdaterHook): "Cyclic momentum Scheduler.\n\n Implement the cyclical momentum scheduler policy described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n This momentum scheduler usually used together with the CyclicLRUpdater\n to improve the performance in the 3D detection area.\n\n Args:\n target_ratio (tuple[float]): Relative ratio of the lowest momentum and\n the highest momentum to the initial momentum.\n cyclic_times (int): Number of cycles during training\n step_ratio_up (float): The ratio of the increasing process of momentum\n in the total cycle.\n by_epoch (bool): Whether to update momentum by epoch.\n anneal_strategy (str, optional): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing. Default: 'cos'.\n gamma (float, optional): Cycle decay ratio. Default: 1.\n It takes values in the range (0, 1]. The difference between the\n maximum learning rate and the minimum learning rate decreases\n periodically when it is less than 1. `New in version 1.4.4.`\n " def __init__(self, by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', gamma=1, **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, (target_ratio / 100000.0)) elif isinstance(target_ratio, tuple): target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio) else: raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}') assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats' assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.gamma = gamma self.momentum_phases = [] if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear assert (not by_epoch), 'currently only support "by_epoch" = False' super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicMomentumUpdaterHook, self).before_run(runner) max_iter_per_phase = (runner.max_iters // self.cyclic_times) iter_up_phase = int((self.step_ratio_up * max_iter_per_phase)) self.max_iter_per_phase = max_iter_per_phase self.momentum_phases.append([0, iter_up_phase, 1, self.target_ratio[0]]) self.momentum_phases.append([iter_up_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]]) def get_momentum(self, runner, base_momentum): curr_iter = (runner.iter % self.max_iter_per_phase) curr_cycle = (runner.iter // self.max_iter_per_phase) scale = (self.gamma ** curr_cycle) for (start_iter, end_iter, start_ratio, end_ratio) in self.momentum_phases: if (start_iter <= curr_iter < end_iter): if (start_iter == 0): end_ratio = ((1 - scale) + (end_ratio * scale)) else: start_ratio = ((1 - scale) + (start_ratio * scale)) progress = (curr_iter - start_iter) return self.anneal_func((base_momentum * start_ratio), (base_momentum * end_ratio), (progress / (end_iter - start_iter)))
@HOOKS.register_module() class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): "OneCycle momentum Scheduler.\n\n This momentum scheduler usually used together with the OneCycleLrUpdater\n to improve the performance.\n\n Args:\n base_momentum (float or list): Lower momentum boundaries in the cycle\n for each parameter group. Note that momentum is cycled inversely\n to learning rate; at the peak of a cycle, momentum is\n 'base_momentum' and learning rate is 'max_lr'.\n Default: 0.85\n max_momentum (float or list): Upper momentum boundaries in the cycle\n for each parameter group. Functionally,\n it defines the cycle amplitude (max_momentum - base_momentum).\n Note that momentum is cycled inversely\n to learning rate; at the start of a cycle, momentum is\n 'max_momentum' and learning rate is 'base_lr'\n Default: 0.95\n pct_start (float): The percentage of the cycle (in number of steps)\n spent increasing the learning rate.\n Default: 0.3\n anneal_strategy (str): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing.\n Default: 'cos'\n three_phase (bool): If three_phase is True, use a third phase of the\n schedule to annihilate the learning rate according to\n final_div_factor instead of modifying the second phase (the first\n two phases will be symmetrical about the step indicated by\n pct_start).\n Default: False\n " def __init__(self, base_momentum=0.85, max_momentum=0.95, pct_start=0.3, anneal_strategy='cos', three_phase=False, **kwargs): if ('by_epoch' not in kwargs): kwargs['by_epoch'] = False else: assert (not kwargs['by_epoch']), 'currently only support "by_epoch" = False' if (not isinstance(base_momentum, (float, list, dict))): raise ValueError('base_momentum must be the type among of float,list or dict.') self._base_momentum = base_momentum if (not isinstance(max_momentum, (float, list, dict))): raise ValueError('max_momentum must be the type among of float,list or dict.') self._max_momentum = max_momentum if ((pct_start < 0) or (pct_start > 1) or (not isinstance(pct_start, float))): raise ValueError(f'Expected float between 0 and 1 pct_start, but got {pct_start}') self.pct_start = pct_start if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must by one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear self.three_phase = three_phase self.momentum_phases = [] super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): if (('momentum' not in optim.defaults) and ('betas' not in optim.defaults)): raise ValueError('optimizer must support momentum withoption enabled') self.use_beta1 = ('betas' in optim.defaults) _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for (group, b_momentum, m_momentum) in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: (_, beta2) = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum else: optim = runner.optimizer if (('momentum' not in optim.defaults) and ('betas' not in optim.defaults)): raise ValueError('optimizer must support momentum withoption enabled') self.use_beta1 = ('betas' in optim.defaults) k = type(optim).__name__ _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for (group, b_momentum, m_momentum) in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: (_, beta2) = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum if self.three_phase: self.momentum_phases.append({'end_iter': (float((self.pct_start * runner.max_iters)) - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum'}) self.momentum_phases.append({'end_iter': (float(((2 * self.pct_start) * runner.max_iters)) - 2), 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum'}) self.momentum_phases.append({'end_iter': (runner.max_iters - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'max_momentum'}) else: self.momentum_phases.append({'end_iter': (float((self.pct_start * runner.max_iters)) - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum'}) self.momentum_phases.append({'end_iter': (runner.max_iters - 1), 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum'}) def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) else: for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, param_group): curr_iter = runner.iter start_iter = 0 for (i, phase) in enumerate(self.momentum_phases): end_iter = phase['end_iter'] if ((curr_iter <= end_iter) or (i == (len(self.momentum_phases) - 1))): pct = ((curr_iter - start_iter) / (end_iter - start_iter)) momentum = self.anneal_func(param_group[phase['start_momentum']], param_group[phase['end_momentum']], pct) break start_iter = end_iter return momentum def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for (k, optim) in runner.optimizer.items(): _momentum_group = [self.get_momentum(runner, param_group) for param_group in optim.param_groups] momentum_groups.update({k: _momentum_group}) return momentum_groups else: momentum_groups = [] for param_group in runner.optimizer.param_groups: momentum_groups.append(self.get_momentum(runner, param_group)) return momentum_groups
@HOOKS.register_module() class OptimizerHook(Hook): 'A hook contains custom operations for the optimizer.\n\n Args:\n grad_clip (dict, optional): A config dict to control the clip_grad.\n Default: None.\n detect_anomalous_params (bool): This option is only used for\n debugging which will slow down the training speed.\n Detect anomalous parameters that are not included in\n the computational graph with `loss` as the root.\n There are two cases\n\n - Parameters were not used during\n forward pass.\n - Parameters were not used to produce\n loss.\n Default: False.\n ' def __init__(self, grad_clip=None, detect_anomalous_params=False): self.grad_clip = grad_clip self.detect_anomalous_params = detect_anomalous_params def clip_grads(self, params): params = list(filter((lambda p: (p.requires_grad and (p.grad is not None))), params)) if (len(params) > 0): return clip_grad.clip_grad_norm_(params, **self.grad_clip) def after_train_iter(self, runner): runner.optimizer.zero_grad() if self.detect_anomalous_params: self.detect_anomalous_parameters(runner.outputs['loss'], runner) runner.outputs['loss'].backward() if (self.grad_clip is not None): grad_norm = self.clip_grads(runner.model.parameters()) if (grad_norm is not None): runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() def detect_anomalous_parameters(self, loss, runner): logger = runner.logger parameters_in_graph = set() visited = set() def traverse(grad_fn): if (grad_fn is None): return if (grad_fn not in visited): visited.add(grad_fn) if hasattr(grad_fn, 'variable'): parameters_in_graph.add(grad_fn.variable) parents = grad_fn.next_functions if (parents is not None): for parent in parents: grad_fn = parent[0] traverse(grad_fn) traverse(loss.grad_fn) for (n, p) in runner.model.named_parameters(): if ((p not in parameters_in_graph) and p.requires_grad): logger.log(level=logging.ERROR, msg=f'''{n} with shape {p.size()} is not in the computational graph ''')
@HOOKS.register_module() class GradientCumulativeOptimizerHook(OptimizerHook): 'Optimizer Hook implements multi-iters gradient cumulating.\n\n Args:\n cumulative_iters (int, optional): Num of gradient cumulative iters.\n The optimizer will step every `cumulative_iters` iters.\n Defaults to 1.\n\n Examples:\n >>> # Use cumulative_iters to simulate a large batch size\n >>> # It is helpful when the hardware cannot handle a large batch size.\n >>> loader = DataLoader(data, batch_size=64)\n >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4)\n >>> # almost equals to\n >>> loader = DataLoader(data, batch_size=256)\n >>> optim_hook = OptimizerHook()\n ' def __init__(self, cumulative_iters=1, **kwargs): super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) assert (isinstance(cumulative_iters, int) and (cumulative_iters > 0)), f'cumulative_iters only accepts positive int, but got {type(cumulative_iters)} instead.' self.cumulative_iters = cumulative_iters self.divisible_iters = 0 self.remainder_iters = 0 self.initialized = False def has_batch_norm(self, module): if isinstance(module, _BatchNorm): return True for m in module.children(): if self.has_batch_norm(m): return True return False def _init(self, runner): if ((runner.iter % self.cumulative_iters) != 0): runner.logger.warning('Resume iter number is not divisible by cumulative_iters in GradientCumulativeOptimizerHook, which means the gradient of some iters is lost and the result may be influenced slightly.') if (self.has_batch_norm(runner.model) and (self.cumulative_iters > 1)): runner.logger.warning('GradientCumulativeOptimizerHook may slightly decrease performance if the model has BatchNorm layers.') residual_iters = (runner.max_iters - runner.iter) self.divisible_iters = ((residual_iters // self.cumulative_iters) * self.cumulative_iters) self.remainder_iters = (residual_iters - self.divisible_iters) self.initialized = True def after_train_iter(self, runner): if (not self.initialized): self._init(runner) if (runner.iter < self.divisible_iters): loss_factor = self.cumulative_iters else: loss_factor = self.remainder_iters loss = runner.outputs['loss'] loss = (loss / loss_factor) loss.backward() if (self.every_n_iters(runner, self.cumulative_iters) or self.is_last_iter(runner)): if (self.grad_clip is not None): grad_norm = self.clip_grads(runner.model.parameters()) if (grad_norm is not None): runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() runner.optimizer.zero_grad()
@HOOKS.register_module() class ProfilerHook(Hook): "Profiler to analyze performance during training.\n\n PyTorch Profiler is a tool that allows the collection of the performance\n metrics during the training. More details on Profiler can be found at\n https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile\n\n Args:\n by_epoch (bool): Profile performance by epoch or by iteration.\n Default: True.\n profile_iters (int): Number of iterations for profiling.\n If ``by_epoch=True``, profile_iters indicates that they are the\n first profile_iters epochs at the beginning of the\n training, otherwise it indicates the first profile_iters\n iterations. Default: 1.\n activities (list[str]): List of activity groups (CPU, CUDA) to use in\n profiling. Default: ['cpu', 'cuda'].\n schedule (dict, optional): Config of generating the callable schedule.\n if schedule is None, profiler will not add step markers into the\n trace and table view. Default: None.\n on_trace_ready (callable, dict): Either a handler or a dict of generate\n handler. Default: None.\n record_shapes (bool): Save information about operator's input shapes.\n Default: False.\n profile_memory (bool): Track tensor memory allocation/deallocation.\n Default: False.\n with_stack (bool): Record source information (file and line number)\n for the ops. Default: False.\n with_flops (bool): Use formula to estimate the FLOPS of specific\n operators (matrix multiplication and 2D convolution).\n Default: False.\n json_trace_path (str, optional): Exports the collected trace in Chrome\n JSON format. Default: None.\n\n Example:\n >>> runner = ... # instantiate a Runner\n >>> # tensorboard trace\n >>> trace_config = dict(type='tb_trace', dir_name='work_dir')\n >>> profiler_config = dict(on_trace_ready=trace_config)\n >>> runner.register_profiler_hook(profiler_config)\n >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)])\n " def __init__(self, by_epoch: bool=True, profile_iters: int=1, activities: List[str]=['cpu', 'cuda'], schedule: Optional[dict]=None, on_trace_ready: Optional[Union[(Callable, dict)]]=None, record_shapes: bool=False, profile_memory: bool=False, with_stack: bool=False, with_flops: bool=False, json_trace_path: Optional[str]=None) -> None: try: from torch import profiler except ImportError: raise ImportError(f'profiler is the new feature of torch1.8.1, but your version is {torch.__version__}') assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' self.by_epoch = by_epoch if (profile_iters < 1): raise ValueError(f'profile_iters should be greater than 0, but got {profile_iters}') self.profile_iters = profile_iters if (not isinstance(activities, list)): raise ValueError(f'activities should be list, but got {type(activities)}') self.activities = [] for activity in activities: activity = activity.lower() if (activity == 'cpu'): self.activities.append(profiler.ProfilerActivity.CPU) elif (activity == 'cuda'): self.activities.append(profiler.ProfilerActivity.CUDA) else: raise ValueError(f'activity should be "cpu" or "cuda", but got {activity}') if (schedule is not None): self.schedule = profiler.schedule(**schedule) else: self.schedule = None self.on_trace_ready = on_trace_ready self.record_shapes = record_shapes self.profile_memory = profile_memory self.with_stack = with_stack self.with_flops = with_flops self.json_trace_path = json_trace_path @master_only def before_run(self, runner): if (self.by_epoch and (runner.max_epochs < self.profile_iters)): raise ValueError(f'self.profile_iters should not be greater than {runner.max_epochs}') if ((not self.by_epoch) and (runner.max_iters < self.profile_iters)): raise ValueError(f'self.profile_iters should not be greater than {runner.max_iters}') if callable(self.on_trace_ready): _on_trace_ready = self.on_trace_ready elif isinstance(self.on_trace_ready, dict): trace_cfg = self.on_trace_ready.copy() trace_type = trace_cfg.pop('type') if (trace_type == 'log_trace'): def _log_handler(prof): print(prof.key_averages().table(**trace_cfg)) _on_trace_ready = _log_handler elif (trace_type == 'tb_trace'): try: import torch_tb_profiler except ImportError: raise ImportError('please run "pip install torch-tb-profiler" to install torch_tb_profiler') _on_trace_ready = torch.profiler.tensorboard_trace_handler(**trace_cfg) else: raise ValueError(f'trace_type should be "log_trace" or "tb_trace", but got {trace_type}') elif (self.on_trace_ready is None): _on_trace_ready = None else: raise ValueError(f'on_trace_ready should be handler, dict or None, but got {type(self.on_trace_ready)}') if (self.by_epoch and (runner.max_epochs > 1)): warnings.warn(f'profiler will profile {runner.max_epochs} epochs instead of 1 epoch. Since profiler will slow down the training, it is recommended to train 1 epoch with ProfilerHook and adjust your setting according to the profiler summary. During normal training (epoch > 1), you may disable the ProfilerHook.') self.profiler = torch.profiler.profile(activities=self.activities, schedule=self.schedule, on_trace_ready=_on_trace_ready, record_shapes=self.record_shapes, profile_memory=self.profile_memory, with_stack=self.with_stack, with_flops=self.with_flops) self.profiler.__enter__() runner.logger.info('profiler is profiling...') @master_only def after_train_epoch(self, runner): if (self.by_epoch and (runner.epoch == (self.profile_iters - 1))): runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if (self.json_trace_path is not None): self.profiler.export_chrome_trace(self.json_trace_path) @master_only def after_train_iter(self, runner): self.profiler.step() if ((not self.by_epoch) and (runner.iter == (self.profile_iters - 1))): runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if (self.json_trace_path is not None): self.profiler.export_chrome_trace(self.json_trace_path)
@HOOKS.register_module() class DistSamplerSeedHook(Hook): 'Data-loading sampler for distributed training.\n\n When distributed training, it is only useful in conjunction with\n :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same\n purpose with :obj:`IterLoader`.\n ' def before_epoch(self, runner): if hasattr(runner.data_loader.sampler, 'set_epoch'): runner.data_loader.sampler.set_epoch(runner.epoch) elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
@HOOKS.register_module() class SyncBuffersHook(Hook): 'Synchronize model buffers such as running_mean and running_var in BN at\n the end of each epoch.\n\n Args:\n distributed (bool): Whether distributed training is used. It is\n effective only for distributed training. Defaults to True.\n ' def __init__(self, distributed=True): self.distributed = distributed def after_epoch(self, runner): 'All-reduce model buffers at the end of each epoch.' if self.distributed: allreduce_params(runner.model.buffers())
class IterLoader(): def __init__(self, dataloader): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._epoch = 0 @property def epoch(self): return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, 'set_epoch'): self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __len__(self): return len(self._dataloader)
@RUNNERS.register_module() class IterBasedRunner(BaseRunner): 'Iteration-based Runner.\n\n This runner train models iteration by iteration.\n ' def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._epoch = data_loader.epoch data_batch = next(data_loader) self.call_hook('before_train_iter') outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('model.train_step() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_train_iter') self._inner_iter += 1 self._iter += 1 @torch.no_grad() def val(self, data_loader, **kwargs): self.model.eval() self.mode = 'val' self.data_loader = data_loader data_batch = next(data_loader) self.call_hook('before_val_iter') outputs = self.model.val_step(data_batch, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('model.val_step() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_val_iter') self._inner_iter += 1 def run(self, data_loaders, workflow, max_iters=None, **kwargs): "Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, iters) to specify the\n running order and iterations. E.g, [('train', 10000),\n ('val', 1000)] means running 10000 iterations for training and\n 1000 iterations for validation, iteratively.\n " assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert (len(data_loaders) == len(workflow)) if (max_iters is not None): warnings.warn('setting max_iters in run is deprecated, please set max_iters in runner_config', DeprecationWarning) self._max_iters = max_iters assert (self._max_iters is not None), 'max_iters must be specified during instantiation' work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE') self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info()) self.logger.info('workflow: %s, max: %d iters', workflow, self._max_iters) self.call_hook('before_run') iter_loaders = [IterLoader(x) for x in data_loaders] self.call_hook('before_epoch') while (self.iter < self._max_iters): for (i, flow) in enumerate(workflow): self._inner_iter = 0 (mode, iters) = flow if ((not isinstance(mode, str)) or (not hasattr(self, mode))): raise ValueError('runner has no method named "{}" to run a workflow'.format(mode)) iter_runner = getattr(self, mode) for _ in range(iters): if ((mode == 'train') and (self.iter >= self._max_iters)): break iter_runner(iter_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_epoch') self.call_hook('after_run') def resume(self, checkpoint, resume_optimizer=True, map_location='default'): "Resume model from checkpoint.\n\n Args:\n checkpoint (str): Checkpoint to resume from.\n resume_optimizer (bool, optional): Whether resume the optimizer(s)\n if the checkpoint file includes optimizer(s). Default to True.\n map_location (str, optional): Same as :func:`torch.load`.\n Default to 'default'.\n " if (map_location == 'default'): device_id = torch.cuda.current_device() checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id))) else: checkpoint = self.load_checkpoint(checkpoint, map_location=map_location) self._epoch = checkpoint['meta']['epoch'] self._iter = checkpoint['meta']['iter'] self._inner_iter = checkpoint['meta']['iter'] if (('optimizer' in checkpoint) and resume_optimizer): if isinstance(self.optimizer, Optimizer): self.optimizer.load_state_dict(checkpoint['optimizer']) elif isinstance(self.optimizer, dict): for k in self.optimizer.keys(): self.optimizer[k].load_state_dict(checkpoint['optimizer'][k]) else: raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}') self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') def save_checkpoint(self, out_dir, filename_tmpl='iter_{}.pth', meta=None, save_optimizer=True, create_symlink=True): "Save checkpoint to file.\n\n Args:\n out_dir (str): Directory to save checkpoint files.\n filename_tmpl (str, optional): Checkpoint file template.\n Defaults to 'iter_{}.pth'.\n meta (dict, optional): Metadata to be saved in checkpoint.\n Defaults to None.\n save_optimizer (bool, optional): Whether save optimizer.\n Defaults to True.\n create_symlink (bool, optional): Whether create symlink to the\n latest checkpoint file. Defaults to True.\n " if (meta is None): meta = {} elif (not isinstance(meta, dict)): raise TypeError(f'meta should be a dict or None, but got {type(meta)}') if (self.meta is not None): meta.update(self.meta) meta.update(epoch=(self.epoch + 1), iter=self.iter) filename = filename_tmpl.format((self.iter + 1)) filepath = osp.join(out_dir, filename) optimizer = (self.optimizer if save_optimizer else None) save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) if create_symlink: dst_file = osp.join(out_dir, 'latest.pth') if (platform.system() != 'Windows'): mmcv.symlink(filename, dst_file) else: shutil.copy(filepath, dst_file) def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, custom_hooks_config=None): 'Register default hooks for iter-based training.\n\n Checkpoint hook, optimizer stepper hook and logger hooks will be set to\n `by_epoch=False` by default.\n\n Default hooks include:\n\n +----------------------+-------------------------+\n | Hooks | Priority |\n +======================+=========================+\n | LrUpdaterHook | VERY_HIGH (10) |\n +----------------------+-------------------------+\n | MomentumUpdaterHook | HIGH (30) |\n +----------------------+-------------------------+\n | OptimizerStepperHook | ABOVE_NORMAL (40) |\n +----------------------+-------------------------+\n | CheckpointSaverHook | NORMAL (50) |\n +----------------------+-------------------------+\n | IterTimerHook | LOW (70) |\n +----------------------+-------------------------+\n | LoggerHook(s) | VERY_LOW (90) |\n +----------------------+-------------------------+\n | CustomHook(s) | defaults to NORMAL (50) |\n +----------------------+-------------------------+\n\n If custom hooks have same priority with default hooks, custom hooks\n will be triggered after default hooks.\n ' if (checkpoint_config is not None): checkpoint_config.setdefault('by_epoch', False) if (lr_config is not None): lr_config.setdefault('by_epoch', False) if (log_config is not None): for info in log_config['hooks']: info.setdefault('by_epoch', False) super(IterBasedRunner, self).register_training_hooks(lr_config=lr_config, momentum_config=momentum_config, optimizer_config=optimizer_config, checkpoint_config=checkpoint_config, log_config=log_config, timer_config=IterTimerHook(), custom_hooks_config=custom_hooks_config)
class LogBuffer(): def __init__(self): self.val_history = OrderedDict() self.n_history = OrderedDict() self.output = OrderedDict() self.ready = False def clear(self): self.val_history.clear() self.n_history.clear() self.clear_output() def clear_output(self): self.output.clear() self.ready = False def update(self, vars, count=1): assert isinstance(vars, dict) for (key, var) in vars.items(): if (key not in self.val_history): self.val_history[key] = [] self.n_history[key] = [] self.val_history[key].append(var) self.n_history[key].append(count) def average(self, n=0): 'Average latest n values or all values.' assert (n >= 0) for key in self.val_history: values = np.array(self.val_history[key][(- n):]) nums = np.array(self.n_history[key][(- n):]) avg = (np.sum((values * nums)) / np.sum(nums)) self.output[key] = avg self.ready = True
def register_torch_optimizers(): torch_optimizers = [] for module_name in dir(torch.optim): if module_name.startswith('__'): continue _optim = getattr(torch.optim, module_name) if (inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer)): OPTIMIZERS.register_module()(_optim) torch_optimizers.append(module_name) return torch_optimizers
def build_optimizer_constructor(cfg): return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor(dict(type=constructor_type, optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)) optimizer = optim_constructor(model) return optimizer
@OPTIMIZER_BUILDERS.register_module() class DefaultOptimizerConstructor(): "Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following fields:\n\n - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If\n one of the keys in ``custom_keys`` is a substring of the name of one\n parameter, then the setting of the parameter will be specified by\n ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will\n be ignored. It should be noted that the aforementioned ``key`` is the\n longest key that is a substring of the name of the parameter. If there\n are multiple matched keys with the same length, then the key with lower\n alphabet order will be chosen.\n ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``\n and ``decay_mult``. See Example 2 below.\n - ``bias_lr_mult`` (float): It will be multiplied to the learning\n rate for all bias parameters (except for those in normalization\n layers and offset layers of DCN).\n - ``bias_decay_mult`` (float): It will be multiplied to the weight\n decay for all bias parameters (except for those in\n normalization layers, depthwise conv layers, offset layers of DCN).\n - ``norm_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of normalization\n layers.\n - ``dwconv_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of depthwise conv\n layers.\n - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning\n rate for parameters of offset layer in the deformable convs\n of a model.\n - ``bypass_duplicate`` (bool): If true, the duplicate parameters\n would not be added into optimizer. Default: False.\n\n Note:\n\n 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n override the effect of ``bias_lr_mult`` in the bias of offset layer.\n So be careful when using both ``bias_lr_mult`` and\n ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset\n layer in deformable convs, set ``dcn_offset_lr_mult`` to the original\n ``dcn_offset_lr_mult`` * ``bias_lr_mult``.\n\n 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n apply it to all the DCN layers in the model. So be careful when the\n model contains multiple DCN layers in places other than backbone.\n\n Args:\n model (:obj:`nn.Module`): The model with parameters to be optimized.\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are\n\n - `type`: class name of the optimizer.\n\n Optional fields are\n\n - any arguments of the corresponding optimizer type, e.g.,\n lr, weight_decay, momentum, etc.\n paramwise_cfg (dict, optional): Parameter-wise options.\n\n Example 1:\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> paramwise_cfg = dict(norm_decay_mult=0.)\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n\n Example 2:\n >>> # assume model have attribute model.backbone and model.cls_head\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)\n >>> paramwise_cfg = dict(custom_keys={\n 'backbone': dict(lr_mult=0.1, decay_mult=0.9)})\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n >>> # Then the `lr` and `weight_decay` for model.backbone is\n >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for\n >>> # model.cls_head is (0.01, 0.95).\n " def __init__(self, optimizer_cfg, paramwise_cfg=None): if (not isinstance(optimizer_cfg, dict)): raise TypeError('optimizer_cfg should be a dict', f'but got {type(optimizer_cfg)}') self.optimizer_cfg = optimizer_cfg self.paramwise_cfg = ({} if (paramwise_cfg is None) else paramwise_cfg) self.base_lr = optimizer_cfg.get('lr', None) self.base_wd = optimizer_cfg.get('weight_decay', None) self._validate_cfg() def _validate_cfg(self): if (not isinstance(self.paramwise_cfg, dict)): raise TypeError(f'paramwise_cfg should be None or a dict, but got {type(self.paramwise_cfg)}') if ('custom_keys' in self.paramwise_cfg): if (not isinstance(self.paramwise_cfg['custom_keys'], dict)): raise TypeError(f"If specified, custom_keys must be a dict, but got {type(self.paramwise_cfg['custom_keys'])}") if (self.base_wd is None): for key in self.paramwise_cfg['custom_keys']: if ('decay_mult' in self.paramwise_cfg['custom_keys'][key]): raise ValueError('base_wd should not be None') if (('bias_decay_mult' in self.paramwise_cfg) or ('norm_decay_mult' in self.paramwise_cfg) or ('dwconv_decay_mult' in self.paramwise_cfg)): if (self.base_wd is None): raise ValueError('base_wd should not be None') def _is_in(self, param_group, param_group_list): assert is_list_of(param_group_list, dict) param = set(param_group['params']) param_set = set() for group in param_group_list: param_set.update(set(group['params'])) return (not param.isdisjoint(param_set)) def add_params(self, params, module, prefix='', is_dcn_module=None): "Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n is_dcn_module (int|float|None): If the current module is a\n submodule of DCN, `is_dcn_module` will be passed to\n control conv_offset layer's learning rate. Defaults to None.\n " custom_keys = self.paramwise_cfg.get('custom_keys', {}) sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.0) bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.0) norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.0) dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.0) bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.0) is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) is_dwconv = (isinstance(module, torch.nn.Conv2d) and (module.in_channels == module.groups)) for (name, param) in module.named_parameters(recurse=False): param_group = {'params': [param]} if (not param.requires_grad): params.append(param_group) continue if (bypass_duplicate and self._is_in(param_group, params)): warnings.warn(f'{prefix} is duplicate. It is skipped since bypass_duplicate={bypass_duplicate}') continue is_custom = False for key in sorted_keys: if (key in f'{prefix}.{name}'): is_custom = True lr_mult = custom_keys[key].get('lr_mult', 1.0) param_group['lr'] = (self.base_lr * lr_mult) if (self.base_wd is not None): decay_mult = custom_keys[key].get('decay_mult', 1.0) param_group['weight_decay'] = (self.base_wd * decay_mult) break if (not is_custom): if ((name == 'bias') and (not (is_norm or is_dcn_module))): param_group['lr'] = (self.base_lr * bias_lr_mult) if ((prefix.find('conv_offset') != (- 1)) and is_dcn_module and isinstance(module, torch.nn.Conv2d)): param_group['lr'] = (self.base_lr * dcn_offset_lr_mult) if (self.base_wd is not None): if is_norm: param_group['weight_decay'] = (self.base_wd * norm_decay_mult) elif is_dwconv: param_group['weight_decay'] = (self.base_wd * dwconv_decay_mult) elif ((name == 'bias') and (not is_dcn_module)): param_group['weight_decay'] = (self.base_wd * bias_decay_mult) params.append(param_group) if check_ops_exist(): from mmcv.ops import DeformConv2d, ModulatedDeformConv2d is_dcn_module = isinstance(module, (DeformConv2d, ModulatedDeformConv2d)) else: is_dcn_module = False for (child_name, child_mod) in module.named_children(): child_prefix = (f'{prefix}.{child_name}' if prefix else child_name) self.add_params(params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module) def __call__(self, model): if hasattr(model, 'module'): model = model.module optimizer_cfg = self.optimizer_cfg.copy() if (not self.paramwise_cfg): optimizer_cfg['params'] = model.parameters() return build_from_cfg(optimizer_cfg, OPTIMIZERS) params = [] self.add_params(params, model) optimizer_cfg['params'] = params return build_from_cfg(optimizer_cfg, OPTIMIZERS)
class Priority(Enum): 'Hook priority levels.\n\n +--------------+------------+\n | Level | Value |\n +==============+============+\n | HIGHEST | 0 |\n +--------------+------------+\n | VERY_HIGH | 10 |\n +--------------+------------+\n | HIGH | 30 |\n +--------------+------------+\n | ABOVE_NORMAL | 40 |\n +--------------+------------+\n | NORMAL | 50 |\n +--------------+------------+\n | BELOW_NORMAL | 60 |\n +--------------+------------+\n | LOW | 70 |\n +--------------+------------+\n | VERY_LOW | 90 |\n +--------------+------------+\n | LOWEST | 100 |\n +--------------+------------+\n ' HIGHEST = 0 VERY_HIGH = 10 HIGH = 30 ABOVE_NORMAL = 40 NORMAL = 50 BELOW_NORMAL = 60 LOW = 70 VERY_LOW = 90 LOWEST = 100
def get_priority(priority): 'Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n ' if isinstance(priority, int): if ((priority < 0) or (priority > 100)): raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
def get_host_info(): 'Get hostname and username.\n\n Return empty string if exception raised, e.g. ``getpass.getuser()`` will\n lead to error in docker container\n ' host = '' try: host = f'{getuser()}@{gethostname()}' except Exception as e: warnings.warn(f'Host or user not found: {str(e)}') finally: return host
def get_time_str(): return time.strftime('%Y%m%d_%H%M%S', time.localtime())