code
stringlengths
17
6.64M
def annealing_cos(start, end, factor, weight=1): 'Calculate annealing cos learning rate.\n\n Cosine anneal from `weight * start + (1 - weight) * end` to `end` as\n percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the cosine annealing.\n end (float): The ending learing rate of the cosine annealing.\n factor (float): The coefficient of `pi` when calculating the current\n percentage. Range from 0.0 to 1.0.\n weight (float, optional): The combination factor of `start` and `end`\n when calculating the actual starting learning rate. Default to 1.\n ' cos_out = (cos((pi * factor)) + 1) return (end + (((0.5 * weight) * (start - end)) * cos_out))
def annealing_linear(start, end, factor): 'Calculate annealing linear learning rate.\n\n Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the linear annealing.\n end (float): The ending learing rate of the linear annealing.\n factor (float): The coefficient of `pi` when calculating the current\n percentage. Range from 0.0 to 1.0.\n ' return (start + ((end - start) * factor))
def format_param(name, optim, param): if isinstance(param, numbers.Number): return ([param] * len(optim.param_groups)) elif isinstance(param, (list, tuple)): if (len(param) != len(optim.param_groups)): raise ValueError(f'expected {len(optim.param_groups)} values for {name}, got {len(param)}') return param else: if (name not in param): raise KeyError(f'{name} is not found in {param.keys()}') return param[name]
@HOOKS.register_module() class EmptyCacheHook(Hook): def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): self._before_epoch = before_epoch self._after_epoch = after_epoch self._after_iter = after_iter def after_iter(self, runner): if self._after_iter: torch.cuda.empty_cache() def before_epoch(self, runner): if self._before_epoch: torch.cuda.empty_cache() def after_epoch(self, runner): if self._after_epoch: torch.cuda.empty_cache()
class MomentumUpdaterHook(Hook): def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9): if (warmup is not None): if (warmup not in ['constant', 'linear', 'exp']): raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "constant" and "linear"') if (warmup is not None): assert (warmup_iters > 0), '"warmup_iters" must be a positive integer' assert (0 < warmup_ratio <= 1.0), '"warmup_momentum" must be in range (0,1]' self.by_epoch = by_epoch self.warmup = warmup self.warmup_iters = warmup_iters self.warmup_ratio = warmup_ratio self.base_momentum = [] self.regular_momentum = [] def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) else: for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, base_momentum): raise NotImplementedError def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for k in runner.optimizer.keys(): _momentum_group = [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum[k]] momentum_groups.update({k: _momentum_group}) return momentum_groups else: return [self.get_momentum(runner, _base_momentum) for _base_momentum in self.base_momentum] def get_warmup_momentum(self, cur_iters): def _get_warmup_momentum(cur_iters, regular_momentum): if (self.warmup == 'constant'): warmup_momentum = [(_momentum / self.warmup_ratio) for _momentum in regular_momentum] elif (self.warmup == 'linear'): k = ((1 - (cur_iters / self.warmup_iters)) * (1 - self.warmup_ratio)) warmup_momentum = [(_momentum / (1 - k)) for _momentum in regular_momentum] elif (self.warmup == 'exp'): k = (self.warmup_ratio ** (1 - (cur_iters / self.warmup_iters))) warmup_momentum = [(_momentum / k) for _momentum in regular_momentum] return warmup_momentum if isinstance(self.regular_momentum, dict): momentum_groups = {} for (key, regular_momentum) in self.regular_momentum.items(): momentum_groups[key] = _get_warmup_momentum(cur_iters, regular_momentum) return momentum_groups else: return _get_warmup_momentum(cur_iters, self.regular_momentum) def before_run(self, runner): if isinstance(runner.optimizer, dict): self.base_momentum = {} for (k, optim) in runner.optimizer.items(): for group in optim.param_groups: if ('momentum' in group.keys()): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) _base_momentum = [group['initial_momentum'] for group in optim.param_groups] self.base_momentum.update({k: _base_momentum}) else: for group in runner.optimizer.param_groups: if ('momentum' in group.keys()): group.setdefault('initial_momentum', group['momentum']) else: group.setdefault('initial_momentum', group['betas'][0]) self.base_momentum = [group['initial_momentum'] for group in runner.optimizer.param_groups] def before_train_epoch(self, runner): if (not self.by_epoch): return self.regular_momentum = self.get_regular_momentum(runner) self._set_momentum(runner, self.regular_momentum) def before_train_iter(self, runner): cur_iter = runner.iter if (not self.by_epoch): self.regular_momentum = self.get_regular_momentum(runner) if ((self.warmup is None) or (cur_iter >= self.warmup_iters)): self._set_momentum(runner, self.regular_momentum) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum) elif self.by_epoch: if ((self.warmup is None) or (cur_iter > self.warmup_iters)): return elif (cur_iter == self.warmup_iters): self._set_momentum(runner, self.regular_momentum) else: warmup_momentum = self.get_warmup_momentum(cur_iter) self._set_momentum(runner, warmup_momentum)
@HOOKS.register_module() class StepMomentumUpdaterHook(MomentumUpdaterHook): "Step momentum scheduler with min value clipping.\n\n Args:\n step (int | list[int]): Step to decay the momentum. If an int value is\n given, regard it as the decay interval. If a list is given, decay\n momentum at these steps.\n gamma (float, optional): Decay momentum ratio. Default: 0.5.\n min_momentum (float, optional): Minimum momentum value to keep. If\n momentum after decay is lower than this value, it will be clipped\n accordingly. If None is given, we don't perform lr clipping.\n Default: None.\n " def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs): if isinstance(step, list): assert mmcv.is_list_of(step, int) assert all([(s > 0) for s in step]) elif isinstance(step, int): assert (step > 0) else: raise TypeError('"step" must be a list or integer') self.step = step self.gamma = gamma self.min_momentum = min_momentum super(StepMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): progress = (runner.epoch if self.by_epoch else runner.iter) if isinstance(self.step, int): exp = (progress // self.step) else: exp = len(self.step) for (i, s) in enumerate(self.step): if (progress < s): exp = i break momentum = (base_momentum * (self.gamma ** exp)) if (self.min_momentum is not None): momentum = max(momentum, self.min_momentum) return momentum
@HOOKS.register_module() class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): assert ((min_momentum is None) ^ (min_momentum_ratio is None)) self.min_momentum = min_momentum self.min_momentum_ratio = min_momentum_ratio super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs) def get_momentum(self, runner, base_momentum): if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters if (self.min_momentum_ratio is not None): target_momentum = (base_momentum * self.min_momentum_ratio) else: target_momentum = self.min_momentum return annealing_cos(base_momentum, target_momentum, (progress / max_progress))
@HOOKS.register_module() class CyclicMomentumUpdaterHook(MomentumUpdaterHook): "Cyclic momentum Scheduler.\n\n Implement the cyclical momentum scheduler policy described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n This momentum scheduler usually used together with the CyclicLRUpdater\n to improve the performance in the 3D detection area.\n\n Args:\n target_ratio (tuple[float]): Relative ratio of the lowest momentum and\n the highest momentum to the initial momentum.\n cyclic_times (int): Number of cycles during training\n step_ratio_up (float): The ratio of the increasing process of momentum\n in the total cycle.\n by_epoch (bool): Whether to update momentum by epoch.\n anneal_strategy (str, optional): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing. Default: 'cos'.\n gamma (float, optional): Cycle decay ratio. Default: 1.\n It takes values in the range (0, 1]. The difference between the\n maximum learning rate and the minimum learning rate decreases\n periodically when it is less than 1. `New in version 1.4.4.`\n " def __init__(self, by_epoch=False, target_ratio=((0.85 / 0.95), 1), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', gamma=1, **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, (target_ratio / 100000.0)) elif isinstance(target_ratio, tuple): target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio) else: raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}') assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats' assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.gamma = gamma self.momentum_phases = [] if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear assert (not by_epoch), 'currently only support "by_epoch" = False' super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicMomentumUpdaterHook, self).before_run(runner) max_iter_per_phase = (runner.max_iters // self.cyclic_times) iter_up_phase = int((self.step_ratio_up * max_iter_per_phase)) self.max_iter_per_phase = max_iter_per_phase self.momentum_phases.append([0, iter_up_phase, 1, self.target_ratio[0]]) self.momentum_phases.append([iter_up_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]]) def get_momentum(self, runner, base_momentum): curr_iter = (runner.iter % self.max_iter_per_phase) curr_cycle = (runner.iter // self.max_iter_per_phase) scale = (self.gamma ** curr_cycle) for (start_iter, end_iter, start_ratio, end_ratio) in self.momentum_phases: if (start_iter <= curr_iter < end_iter): if (start_iter == 0): end_ratio = ((1 - scale) + (end_ratio * scale)) else: start_ratio = ((1 - scale) + (start_ratio * scale)) progress = (curr_iter - start_iter) return self.anneal_func((base_momentum * start_ratio), (base_momentum * end_ratio), (progress / (end_iter - start_iter)))
@HOOKS.register_module() class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): "OneCycle momentum Scheduler.\n\n This momentum scheduler usually used together with the OneCycleLrUpdater\n to improve the performance.\n\n Args:\n base_momentum (float or list): Lower momentum boundaries in the cycle\n for each parameter group. Note that momentum is cycled inversely\n to learning rate; at the peak of a cycle, momentum is\n 'base_momentum' and learning rate is 'max_lr'.\n Default: 0.85\n max_momentum (float or list): Upper momentum boundaries in the cycle\n for each parameter group. Functionally,\n it defines the cycle amplitude (max_momentum - base_momentum).\n Note that momentum is cycled inversely\n to learning rate; at the start of a cycle, momentum is\n 'max_momentum' and learning rate is 'base_lr'\n Default: 0.95\n pct_start (float): The percentage of the cycle (in number of steps)\n spent increasing the learning rate.\n Default: 0.3\n anneal_strategy (str): {'cos', 'linear'}\n Specifies the annealing strategy: 'cos' for cosine annealing,\n 'linear' for linear annealing.\n Default: 'cos'\n three_phase (bool): If three_phase is True, use a third phase of the\n schedule to annihilate the learning rate according to\n final_div_factor instead of modifying the second phase (the first\n two phases will be symmetrical about the step indicated by\n pct_start).\n Default: False\n " def __init__(self, base_momentum=0.85, max_momentum=0.95, pct_start=0.3, anneal_strategy='cos', three_phase=False, **kwargs): if ('by_epoch' not in kwargs): kwargs['by_epoch'] = False else: assert (not kwargs['by_epoch']), 'currently only support "by_epoch" = False' if (not isinstance(base_momentum, (float, list, dict))): raise ValueError('base_momentum must be the type among of float,list or dict.') self._base_momentum = base_momentum if (not isinstance(max_momentum, (float, list, dict))): raise ValueError('max_momentum must be the type among of float,list or dict.') self._max_momentum = max_momentum if ((pct_start < 0) or (pct_start > 1) or (not isinstance(pct_start, float))): raise ValueError(f'Expected float between 0 and 1 pct_start, but got {pct_start}') self.pct_start = pct_start if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must by one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear self.three_phase = three_phase self.momentum_phases = [] super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs) def before_run(self, runner): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): if (('momentum' not in optim.defaults) and ('betas' not in optim.defaults)): raise ValueError('optimizer must support momentum withoption enabled') self.use_beta1 = ('betas' in optim.defaults) _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for (group, b_momentum, m_momentum) in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: (_, beta2) = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum else: optim = runner.optimizer if (('momentum' not in optim.defaults) and ('betas' not in optim.defaults)): raise ValueError('optimizer must support momentum withoption enabled') self.use_beta1 = ('betas' in optim.defaults) k = type(optim).__name__ _base_momentum = format_param(k, optim, self._base_momentum) _max_momentum = format_param(k, optim, self._max_momentum) for (group, b_momentum, m_momentum) in zip(optim.param_groups, _base_momentum, _max_momentum): if self.use_beta1: (_, beta2) = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['base_momentum'] = b_momentum group['max_momentum'] = m_momentum if self.three_phase: self.momentum_phases.append({'end_iter': (float((self.pct_start * runner.max_iters)) - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum'}) self.momentum_phases.append({'end_iter': (float(((2 * self.pct_start) * runner.max_iters)) - 2), 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum'}) self.momentum_phases.append({'end_iter': (runner.max_iters - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'max_momentum'}) else: self.momentum_phases.append({'end_iter': (float((self.pct_start * runner.max_iters)) - 1), 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum'}) self.momentum_phases.append({'end_iter': (runner.max_iters - 1), 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum'}) def _set_momentum(self, runner, momentum_groups): if isinstance(runner.optimizer, dict): for (k, optim) in runner.optimizer.items(): for (param_group, mom) in zip(optim.param_groups, momentum_groups[k]): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) else: for (param_group, mom) in zip(runner.optimizer.param_groups, momentum_groups): if ('momentum' in param_group.keys()): param_group['momentum'] = mom elif ('betas' in param_group.keys()): param_group['betas'] = (mom, param_group['betas'][1]) def get_momentum(self, runner, param_group): curr_iter = runner.iter start_iter = 0 for (i, phase) in enumerate(self.momentum_phases): end_iter = phase['end_iter'] if ((curr_iter <= end_iter) or (i == (len(self.momentum_phases) - 1))): pct = ((curr_iter - start_iter) / (end_iter - start_iter)) momentum = self.anneal_func(param_group[phase['start_momentum']], param_group[phase['end_momentum']], pct) break start_iter = end_iter return momentum def get_regular_momentum(self, runner): if isinstance(runner.optimizer, dict): momentum_groups = {} for (k, optim) in runner.optimizer.items(): _momentum_group = [self.get_momentum(runner, param_group) for param_group in optim.param_groups] momentum_groups.update({k: _momentum_group}) return momentum_groups else: momentum_groups = [] for param_group in runner.optimizer.param_groups: momentum_groups.append(self.get_momentum(runner, param_group)) return momentum_groups
@HOOKS.register_module() class OptimizerHook(Hook): 'A hook contains custom operations for the optimizer.\n\n Args:\n grad_clip (dict, optional): A config dict to control the clip_grad.\n Default: None.\n detect_anomalous_params (bool): This option is only used for\n debugging which will slow down the training speed.\n Detect anomalous parameters that are not included in\n the computational graph with `loss` as the root.\n There are two cases\n\n - Parameters were not used during\n forward pass.\n - Parameters were not used to produce\n loss.\n Default: False.\n ' def __init__(self, grad_clip=None, detect_anomalous_params=False): self.grad_clip = grad_clip self.detect_anomalous_params = detect_anomalous_params def clip_grads(self, params): params = list(filter((lambda p: (p.requires_grad and (p.grad is not None))), params)) if (len(params) > 0): return clip_grad.clip_grad_norm_(params, **self.grad_clip) def after_train_iter(self, runner): runner.optimizer.zero_grad() if self.detect_anomalous_params: self.detect_anomalous_parameters(runner.outputs['loss'], runner) runner.outputs['loss'].backward() if (self.grad_clip is not None): grad_norm = self.clip_grads(runner.model.parameters()) if (grad_norm is not None): runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() def detect_anomalous_parameters(self, loss, runner): logger = runner.logger parameters_in_graph = set() visited = set() def traverse(grad_fn): if (grad_fn is None): return if (grad_fn not in visited): visited.add(grad_fn) if hasattr(grad_fn, 'variable'): parameters_in_graph.add(grad_fn.variable) parents = grad_fn.next_functions if (parents is not None): for parent in parents: grad_fn = parent[0] traverse(grad_fn) traverse(loss.grad_fn) for (n, p) in runner.model.named_parameters(): if ((p not in parameters_in_graph) and p.requires_grad): logger.log(level=logging.ERROR, msg=f'''{n} with shape {p.size()} is not in the computational graph ''')
@HOOKS.register_module() class GradientCumulativeOptimizerHook(OptimizerHook): 'Optimizer Hook implements multi-iters gradient cumulating.\n\n Args:\n cumulative_iters (int, optional): Num of gradient cumulative iters.\n The optimizer will step every `cumulative_iters` iters.\n Defaults to 1.\n\n Examples:\n >>> # Use cumulative_iters to simulate a large batch size\n >>> # It is helpful when the hardware cannot handle a large batch size.\n >>> loader = DataLoader(data, batch_size=64)\n >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4)\n >>> # almost equals to\n >>> loader = DataLoader(data, batch_size=256)\n >>> optim_hook = OptimizerHook()\n ' def __init__(self, cumulative_iters=1, **kwargs): super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) assert (isinstance(cumulative_iters, int) and (cumulative_iters > 0)), f'cumulative_iters only accepts positive int, but got {type(cumulative_iters)} instead.' self.cumulative_iters = cumulative_iters self.divisible_iters = 0 self.remainder_iters = 0 self.initialized = False def has_batch_norm(self, module): if isinstance(module, _BatchNorm): return True for m in module.children(): if self.has_batch_norm(m): return True return False def _init(self, runner): if ((runner.iter % self.cumulative_iters) != 0): runner.logger.warning('Resume iter number is not divisible by cumulative_iters in GradientCumulativeOptimizerHook, which means the gradient of some iters is lost and the result may be influenced slightly.') if (self.has_batch_norm(runner.model) and (self.cumulative_iters > 1)): runner.logger.warning('GradientCumulativeOptimizerHook may slightly decrease performance if the model has BatchNorm layers.') residual_iters = (runner.max_iters - runner.iter) self.divisible_iters = ((residual_iters // self.cumulative_iters) * self.cumulative_iters) self.remainder_iters = (residual_iters - self.divisible_iters) self.initialized = True def after_train_iter(self, runner): if (not self.initialized): self._init(runner) if (runner.iter < self.divisible_iters): loss_factor = self.cumulative_iters else: loss_factor = self.remainder_iters loss = runner.outputs['loss'] loss = (loss / loss_factor) loss.backward() if (self.every_n_iters(runner, self.cumulative_iters) or self.is_last_iter(runner)): if (self.grad_clip is not None): grad_norm = self.clip_grads(runner.model.parameters()) if (grad_norm is not None): runner.log_buffer.update({'grad_norm': float(grad_norm)}, runner.outputs['num_samples']) runner.optimizer.step() runner.optimizer.zero_grad()
@HOOKS.register_module() class ProfilerHook(Hook): "Profiler to analyze performance during training.\n\n PyTorch Profiler is a tool that allows the collection of the performance\n metrics during the training. More details on Profiler can be found at\n https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile\n\n Args:\n by_epoch (bool): Profile performance by epoch or by iteration.\n Default: True.\n profile_iters (int): Number of iterations for profiling.\n If ``by_epoch=True``, profile_iters indicates that they are the\n first profile_iters epochs at the beginning of the\n training, otherwise it indicates the first profile_iters\n iterations. Default: 1.\n activities (list[str]): List of activity groups (CPU, CUDA) to use in\n profiling. Default: ['cpu', 'cuda'].\n schedule (dict, optional): Config of generating the callable schedule.\n if schedule is None, profiler will not add step markers into the\n trace and table view. Default: None.\n on_trace_ready (callable, dict): Either a handler or a dict of generate\n handler. Default: None.\n record_shapes (bool): Save information about operator's input shapes.\n Default: False.\n profile_memory (bool): Track tensor memory allocation/deallocation.\n Default: False.\n with_stack (bool): Record source information (file and line number)\n for the ops. Default: False.\n with_flops (bool): Use formula to estimate the FLOPS of specific\n operators (matrix multiplication and 2D convolution).\n Default: False.\n json_trace_path (str, optional): Exports the collected trace in Chrome\n JSON format. Default: None.\n\n Example:\n >>> runner = ... # instantiate a Runner\n >>> # tensorboard trace\n >>> trace_config = dict(type='tb_trace', dir_name='work_dir')\n >>> profiler_config = dict(on_trace_ready=trace_config)\n >>> runner.register_profiler_hook(profiler_config)\n >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)])\n " def __init__(self, by_epoch: bool=True, profile_iters: int=1, activities: List[str]=['cpu', 'cuda'], schedule: Optional[dict]=None, on_trace_ready: Optional[Union[(Callable, dict)]]=None, record_shapes: bool=False, profile_memory: bool=False, with_stack: bool=False, with_flops: bool=False, json_trace_path: Optional[str]=None) -> None: try: from torch import profiler except ImportError: raise ImportError(f'profiler is the new feature of torch1.8.1, but your version is {torch.__version__}') assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' self.by_epoch = by_epoch if (profile_iters < 1): raise ValueError(f'profile_iters should be greater than 0, but got {profile_iters}') self.profile_iters = profile_iters if (not isinstance(activities, list)): raise ValueError(f'activities should be list, but got {type(activities)}') self.activities = [] for activity in activities: activity = activity.lower() if (activity == 'cpu'): self.activities.append(profiler.ProfilerActivity.CPU) elif (activity == 'cuda'): self.activities.append(profiler.ProfilerActivity.CUDA) else: raise ValueError(f'activity should be "cpu" or "cuda", but got {activity}') if (schedule is not None): self.schedule = profiler.schedule(**schedule) else: self.schedule = None self.on_trace_ready = on_trace_ready self.record_shapes = record_shapes self.profile_memory = profile_memory self.with_stack = with_stack self.with_flops = with_flops self.json_trace_path = json_trace_path @master_only def before_run(self, runner): if (self.by_epoch and (runner.max_epochs < self.profile_iters)): raise ValueError(f'self.profile_iters should not be greater than {runner.max_epochs}') if ((not self.by_epoch) and (runner.max_iters < self.profile_iters)): raise ValueError(f'self.profile_iters should not be greater than {runner.max_iters}') if callable(self.on_trace_ready): _on_trace_ready = self.on_trace_ready elif isinstance(self.on_trace_ready, dict): trace_cfg = self.on_trace_ready.copy() trace_type = trace_cfg.pop('type') if (trace_type == 'log_trace'): def _log_handler(prof): print(prof.key_averages().table(**trace_cfg)) _on_trace_ready = _log_handler elif (trace_type == 'tb_trace'): try: import torch_tb_profiler except ImportError: raise ImportError('please run "pip install torch-tb-profiler" to install torch_tb_profiler') _on_trace_ready = torch.profiler.tensorboard_trace_handler(**trace_cfg) else: raise ValueError(f'trace_type should be "log_trace" or "tb_trace", but got {trace_type}') elif (self.on_trace_ready is None): _on_trace_ready = None else: raise ValueError(f'on_trace_ready should be handler, dict or None, but got {type(self.on_trace_ready)}') if (self.by_epoch and (runner.max_epochs > 1)): warnings.warn(f'profiler will profile {runner.max_epochs} epochs instead of 1 epoch. Since profiler will slow down the training, it is recommended to train 1 epoch with ProfilerHook and adjust your setting according to the profiler summary. During normal training (epoch > 1), you may disable the ProfilerHook.') self.profiler = torch.profiler.profile(activities=self.activities, schedule=self.schedule, on_trace_ready=_on_trace_ready, record_shapes=self.record_shapes, profile_memory=self.profile_memory, with_stack=self.with_stack, with_flops=self.with_flops) self.profiler.__enter__() runner.logger.info('profiler is profiling...') @master_only def after_train_epoch(self, runner): if (self.by_epoch and (runner.epoch == (self.profile_iters - 1))): runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if (self.json_trace_path is not None): self.profiler.export_chrome_trace(self.json_trace_path) @master_only def after_train_iter(self, runner): self.profiler.step() if ((not self.by_epoch) and (runner.iter == (self.profile_iters - 1))): runner.logger.info('profiler may take a few minutes...') self.profiler.__exit__(None, None, None) if (self.json_trace_path is not None): self.profiler.export_chrome_trace(self.json_trace_path)
@HOOKS.register_module() class DistSamplerSeedHook(Hook): 'Data-loading sampler for distributed training.\n\n When distributed training, it is only useful in conjunction with\n :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same\n purpose with :obj:`IterLoader`.\n ' def before_epoch(self, runner): if hasattr(runner.data_loader.sampler, 'set_epoch'): runner.data_loader.sampler.set_epoch(runner.epoch) elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'): runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
@HOOKS.register_module() class SyncBuffersHook(Hook): 'Synchronize model buffers such as running_mean and running_var in BN at\n the end of each epoch.\n\n Args:\n distributed (bool): Whether distributed training is used. It is\n effective only for distributed training. Defaults to True.\n ' def __init__(self, distributed=True): self.distributed = distributed def after_epoch(self, runner): 'All-reduce model buffers at the end of each epoch.' if self.distributed: allreduce_params(runner.model.buffers())
class IterLoader(): def __init__(self, dataloader): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._epoch = 0 @property def epoch(self): return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, 'set_epoch'): self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __len__(self): return len(self._dataloader)
@RUNNERS.register_module() class IterBasedRunner(BaseRunner): 'Iteration-based Runner.\n\n This runner train models iteration by iteration.\n ' def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._epoch = data_loader.epoch data_batch = next(data_loader) self.call_hook('before_train_iter') outputs = self.model.train_step(data_batch, self.optimizer, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('model.train_step() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_train_iter') self._inner_iter += 1 self._iter += 1 @torch.no_grad() def val(self, data_loader, **kwargs): self.model.eval() self.mode = 'val' self.data_loader = data_loader data_batch = next(data_loader) self.call_hook('before_val_iter') outputs = self.model.val_step(data_batch, **kwargs) if (not isinstance(outputs, dict)): raise TypeError('model.val_step() must return a dict') if ('log_vars' in outputs): self.log_buffer.update(outputs['log_vars'], outputs['num_samples']) self.outputs = outputs self.call_hook('after_val_iter') self._inner_iter += 1 def run(self, data_loaders, workflow, max_iters=None, **kwargs): "Start running.\n\n Args:\n data_loaders (list[:obj:`DataLoader`]): Dataloaders for training\n and validation.\n workflow (list[tuple]): A list of (phase, iters) to specify the\n running order and iterations. E.g, [('train', 10000),\n ('val', 1000)] means running 10000 iterations for training and\n 1000 iterations for validation, iteratively.\n " assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert (len(data_loaders) == len(workflow)) if (max_iters is not None): warnings.warn('setting max_iters in run is deprecated, please set max_iters in runner_config', DeprecationWarning) self._max_iters = max_iters assert (self._max_iters is not None), 'max_iters must be specified during instantiation' work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE') self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('Hooks will be executed in the following order:\n%s', self.get_hook_info()) self.logger.info('workflow: %s, max: %d iters', workflow, self._max_iters) self.call_hook('before_run') iter_loaders = [IterLoader(x) for x in data_loaders] self.call_hook('before_epoch') while (self.iter < self._max_iters): for (i, flow) in enumerate(workflow): self._inner_iter = 0 (mode, iters) = flow if ((not isinstance(mode, str)) or (not hasattr(self, mode))): raise ValueError('runner has no method named "{}" to run a workflow'.format(mode)) iter_runner = getattr(self, mode) for _ in range(iters): if ((mode == 'train') and (self.iter >= self._max_iters)): break iter_runner(iter_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_epoch') self.call_hook('after_run') def resume(self, checkpoint, resume_optimizer=True, map_location='default'): "Resume model from checkpoint.\n\n Args:\n checkpoint (str): Checkpoint to resume from.\n resume_optimizer (bool, optional): Whether resume the optimizer(s)\n if the checkpoint file includes optimizer(s). Default to True.\n map_location (str, optional): Same as :func:`torch.load`.\n Default to 'default'.\n " if (map_location == 'default'): device_id = torch.cuda.current_device() checkpoint = self.load_checkpoint(checkpoint, map_location=(lambda storage, loc: storage.cuda(device_id))) else: checkpoint = self.load_checkpoint(checkpoint, map_location=map_location) self._epoch = checkpoint['meta']['epoch'] self._iter = checkpoint['meta']['iter'] self._inner_iter = checkpoint['meta']['iter'] if (('optimizer' in checkpoint) and resume_optimizer): if isinstance(self.optimizer, Optimizer): self.optimizer.load_state_dict(checkpoint['optimizer']) elif isinstance(self.optimizer, dict): for k in self.optimizer.keys(): self.optimizer[k].load_state_dict(checkpoint['optimizer'][k]) else: raise TypeError(f'Optimizer should be dict or torch.optim.Optimizer but got {type(self.optimizer)}') self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}') def save_checkpoint(self, out_dir, filename_tmpl='iter_{}.pth', meta=None, save_optimizer=True, create_symlink=True): "Save checkpoint to file.\n\n Args:\n out_dir (str): Directory to save checkpoint files.\n filename_tmpl (str, optional): Checkpoint file template.\n Defaults to 'iter_{}.pth'.\n meta (dict, optional): Metadata to be saved in checkpoint.\n Defaults to None.\n save_optimizer (bool, optional): Whether save optimizer.\n Defaults to True.\n create_symlink (bool, optional): Whether create symlink to the\n latest checkpoint file. Defaults to True.\n " if (meta is None): meta = {} elif (not isinstance(meta, dict)): raise TypeError(f'meta should be a dict or None, but got {type(meta)}') if (self.meta is not None): meta.update(self.meta) meta.update(epoch=(self.epoch + 1), iter=self.iter) filename = filename_tmpl.format((self.iter + 1)) filepath = osp.join(out_dir, filename) optimizer = (self.optimizer if save_optimizer else None) save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta) if create_symlink: dst_file = osp.join(out_dir, 'latest.pth') if (platform.system() != 'Windows'): mmcv.symlink(filename, dst_file) else: shutil.copy(filepath, dst_file) def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None, momentum_config=None, custom_hooks_config=None): 'Register default hooks for iter-based training.\n\n Checkpoint hook, optimizer stepper hook and logger hooks will be set to\n `by_epoch=False` by default.\n\n Default hooks include:\n\n +----------------------+-------------------------+\n | Hooks | Priority |\n +======================+=========================+\n | LrUpdaterHook | VERY_HIGH (10) |\n +----------------------+-------------------------+\n | MomentumUpdaterHook | HIGH (30) |\n +----------------------+-------------------------+\n | OptimizerStepperHook | ABOVE_NORMAL (40) |\n +----------------------+-------------------------+\n | CheckpointSaverHook | NORMAL (50) |\n +----------------------+-------------------------+\n | IterTimerHook | LOW (70) |\n +----------------------+-------------------------+\n | LoggerHook(s) | VERY_LOW (90) |\n +----------------------+-------------------------+\n | CustomHook(s) | defaults to NORMAL (50) |\n +----------------------+-------------------------+\n\n If custom hooks have same priority with default hooks, custom hooks\n will be triggered after default hooks.\n ' if (checkpoint_config is not None): checkpoint_config.setdefault('by_epoch', False) if (lr_config is not None): lr_config.setdefault('by_epoch', False) if (log_config is not None): for info in log_config['hooks']: info.setdefault('by_epoch', False) super(IterBasedRunner, self).register_training_hooks(lr_config=lr_config, momentum_config=momentum_config, optimizer_config=optimizer_config, checkpoint_config=checkpoint_config, log_config=log_config, timer_config=IterTimerHook(), custom_hooks_config=custom_hooks_config)
class LogBuffer(): def __init__(self): self.val_history = OrderedDict() self.n_history = OrderedDict() self.output = OrderedDict() self.ready = False def clear(self): self.val_history.clear() self.n_history.clear() self.clear_output() def clear_output(self): self.output.clear() self.ready = False def update(self, vars, count=1): assert isinstance(vars, dict) for (key, var) in vars.items(): if (key not in self.val_history): self.val_history[key] = [] self.n_history[key] = [] self.val_history[key].append(var) self.n_history[key].append(count) def average(self, n=0): 'Average latest n values or all values.' assert (n >= 0) for key in self.val_history: values = np.array(self.val_history[key][(- n):]) nums = np.array(self.n_history[key][(- n):]) avg = (np.sum((values * nums)) / np.sum(nums)) self.output[key] = avg self.ready = True
def register_torch_optimizers(): torch_optimizers = [] for module_name in dir(torch.optim): if module_name.startswith('__'): continue _optim = getattr(torch.optim, module_name) if (inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer)): OPTIMIZERS.register_module()(_optim) torch_optimizers.append(module_name) return torch_optimizers
def build_optimizer_constructor(cfg): return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor(dict(type=constructor_type, optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)) optimizer = optim_constructor(model) return optimizer
@OPTIMIZER_BUILDERS.register_module() class DefaultOptimizerConstructor(): "Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following fields:\n\n - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If\n one of the keys in ``custom_keys`` is a substring of the name of one\n parameter, then the setting of the parameter will be specified by\n ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will\n be ignored. It should be noted that the aforementioned ``key`` is the\n longest key that is a substring of the name of the parameter. If there\n are multiple matched keys with the same length, then the key with lower\n alphabet order will be chosen.\n ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``\n and ``decay_mult``. See Example 2 below.\n - ``bias_lr_mult`` (float): It will be multiplied to the learning\n rate for all bias parameters (except for those in normalization\n layers and offset layers of DCN).\n - ``bias_decay_mult`` (float): It will be multiplied to the weight\n decay for all bias parameters (except for those in\n normalization layers, depthwise conv layers, offset layers of DCN).\n - ``norm_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of normalization\n layers.\n - ``dwconv_decay_mult`` (float): It will be multiplied to the weight\n decay for all weight and bias parameters of depthwise conv\n layers.\n - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning\n rate for parameters of offset layer in the deformable convs\n of a model.\n - ``bypass_duplicate`` (bool): If true, the duplicate parameters\n would not be added into optimizer. Default: False.\n\n Note:\n\n 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n override the effect of ``bias_lr_mult`` in the bias of offset layer.\n So be careful when using both ``bias_lr_mult`` and\n ``dcn_offset_lr_mult``. If you wish to apply both of them to the offset\n layer in deformable convs, set ``dcn_offset_lr_mult`` to the original\n ``dcn_offset_lr_mult`` * ``bias_lr_mult``.\n\n 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will\n apply it to all the DCN layers in the model. So be careful when the\n model contains multiple DCN layers in places other than backbone.\n\n Args:\n model (:obj:`nn.Module`): The model with parameters to be optimized.\n optimizer_cfg (dict): The config dict of the optimizer.\n Positional fields are\n\n - `type`: class name of the optimizer.\n\n Optional fields are\n\n - any arguments of the corresponding optimizer type, e.g.,\n lr, weight_decay, momentum, etc.\n paramwise_cfg (dict, optional): Parameter-wise options.\n\n Example 1:\n >>> model = torch.nn.modules.Conv1d(1, 1, 1)\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,\n >>> weight_decay=0.0001)\n >>> paramwise_cfg = dict(norm_decay_mult=0.)\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n\n Example 2:\n >>> # assume model have attribute model.backbone and model.cls_head\n >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)\n >>> paramwise_cfg = dict(custom_keys={\n 'backbone': dict(lr_mult=0.1, decay_mult=0.9)})\n >>> optim_builder = DefaultOptimizerConstructor(\n >>> optimizer_cfg, paramwise_cfg)\n >>> optimizer = optim_builder(model)\n >>> # Then the `lr` and `weight_decay` for model.backbone is\n >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for\n >>> # model.cls_head is (0.01, 0.95).\n " def __init__(self, optimizer_cfg, paramwise_cfg=None): if (not isinstance(optimizer_cfg, dict)): raise TypeError('optimizer_cfg should be a dict', f'but got {type(optimizer_cfg)}') self.optimizer_cfg = optimizer_cfg self.paramwise_cfg = ({} if (paramwise_cfg is None) else paramwise_cfg) self.base_lr = optimizer_cfg.get('lr', None) self.base_wd = optimizer_cfg.get('weight_decay', None) self._validate_cfg() def _validate_cfg(self): if (not isinstance(self.paramwise_cfg, dict)): raise TypeError(f'paramwise_cfg should be None or a dict, but got {type(self.paramwise_cfg)}') if ('custom_keys' in self.paramwise_cfg): if (not isinstance(self.paramwise_cfg['custom_keys'], dict)): raise TypeError(f"If specified, custom_keys must be a dict, but got {type(self.paramwise_cfg['custom_keys'])}") if (self.base_wd is None): for key in self.paramwise_cfg['custom_keys']: if ('decay_mult' in self.paramwise_cfg['custom_keys'][key]): raise ValueError('base_wd should not be None') if (('bias_decay_mult' in self.paramwise_cfg) or ('norm_decay_mult' in self.paramwise_cfg) or ('dwconv_decay_mult' in self.paramwise_cfg)): if (self.base_wd is None): raise ValueError('base_wd should not be None') def _is_in(self, param_group, param_group_list): assert is_list_of(param_group_list, dict) param = set(param_group['params']) param_set = set() for group in param_group_list: param_set.update(set(group['params'])) return (not param.isdisjoint(param_set)) def add_params(self, params, module, prefix='', is_dcn_module=None): "Add all parameters of module to the params list.\n\n The parameters of the given module will be added to the list of param\n groups, with specific rules defined by paramwise_cfg.\n\n Args:\n params (list[dict]): A list of param groups, it will be modified\n in place.\n module (nn.Module): The module to be added.\n prefix (str): The prefix of the module\n is_dcn_module (int|float|None): If the current module is a\n submodule of DCN, `is_dcn_module` will be passed to\n control conv_offset layer's learning rate. Defaults to None.\n " custom_keys = self.paramwise_cfg.get('custom_keys', {}) sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.0) bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.0) norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.0) dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.0) bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False) dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.0) is_norm = isinstance(module, (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) is_dwconv = (isinstance(module, torch.nn.Conv2d) and (module.in_channels == module.groups)) for (name, param) in module.named_parameters(recurse=False): param_group = {'params': [param]} if (not param.requires_grad): params.append(param_group) continue if (bypass_duplicate and self._is_in(param_group, params)): warnings.warn(f'{prefix} is duplicate. It is skipped since bypass_duplicate={bypass_duplicate}') continue is_custom = False for key in sorted_keys: if (key in f'{prefix}.{name}'): is_custom = True lr_mult = custom_keys[key].get('lr_mult', 1.0) param_group['lr'] = (self.base_lr * lr_mult) if (self.base_wd is not None): decay_mult = custom_keys[key].get('decay_mult', 1.0) param_group['weight_decay'] = (self.base_wd * decay_mult) break if (not is_custom): if ((name == 'bias') and (not (is_norm or is_dcn_module))): param_group['lr'] = (self.base_lr * bias_lr_mult) if ((prefix.find('conv_offset') != (- 1)) and is_dcn_module and isinstance(module, torch.nn.Conv2d)): param_group['lr'] = (self.base_lr * dcn_offset_lr_mult) if (self.base_wd is not None): if is_norm: param_group['weight_decay'] = (self.base_wd * norm_decay_mult) elif is_dwconv: param_group['weight_decay'] = (self.base_wd * dwconv_decay_mult) elif ((name == 'bias') and (not is_dcn_module)): param_group['weight_decay'] = (self.base_wd * bias_decay_mult) params.append(param_group) if check_ops_exist(): from mmcv.ops import DeformConv2d, ModulatedDeformConv2d is_dcn_module = isinstance(module, (DeformConv2d, ModulatedDeformConv2d)) else: is_dcn_module = False for (child_name, child_mod) in module.named_children(): child_prefix = (f'{prefix}.{child_name}' if prefix else child_name) self.add_params(params, child_mod, prefix=child_prefix, is_dcn_module=is_dcn_module) def __call__(self, model): if hasattr(model, 'module'): model = model.module optimizer_cfg = self.optimizer_cfg.copy() if (not self.paramwise_cfg): optimizer_cfg['params'] = model.parameters() return build_from_cfg(optimizer_cfg, OPTIMIZERS) params = [] self.add_params(params, model) optimizer_cfg['params'] = params return build_from_cfg(optimizer_cfg, OPTIMIZERS)
class Priority(Enum): 'Hook priority levels.\n\n +--------------+------------+\n | Level | Value |\n +==============+============+\n | HIGHEST | 0 |\n +--------------+------------+\n | VERY_HIGH | 10 |\n +--------------+------------+\n | HIGH | 30 |\n +--------------+------------+\n | ABOVE_NORMAL | 40 |\n +--------------+------------+\n | NORMAL | 50 |\n +--------------+------------+\n | BELOW_NORMAL | 60 |\n +--------------+------------+\n | LOW | 70 |\n +--------------+------------+\n | VERY_LOW | 90 |\n +--------------+------------+\n | LOWEST | 100 |\n +--------------+------------+\n ' HIGHEST = 0 VERY_HIGH = 10 HIGH = 30 ABOVE_NORMAL = 40 NORMAL = 50 BELOW_NORMAL = 60 LOW = 70 VERY_LOW = 90 LOWEST = 100
def get_priority(priority): 'Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n ' if isinstance(priority, int): if ((priority < 0) or (priority > 100)): raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
def get_host_info(): 'Get hostname and username.\n\n Return empty string if exception raised, e.g. ``getpass.getuser()`` will\n lead to error in docker container\n ' host = '' try: host = f'{getuser()}@{gethostname()}' except Exception as e: warnings.warn(f'Host or user not found: {str(e)}') finally: return host
def get_time_str(): return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def obj_from_dict(info, parent=None, default_args=None): 'Initialize an object from dict.\n\n The dict must contain the key "type", which indicates the object type, it\n can be either a string or type, such as "list" or ``list``. Remaining\n fields are treated as the arguments for constructing the object.\n\n Args:\n info (dict): Object types and arguments.\n parent (:class:`module`): Module which may containing expected object\n classes.\n default_args (dict, optional): Default arguments for initializing the\n object.\n\n Returns:\n any type: Object built from the dict.\n ' assert (isinstance(info, dict) and ('type' in info)) assert (isinstance(default_args, dict) or (default_args is None)) args = info.copy() obj_type = args.pop('type') if mmcv.is_str(obj_type): if (parent is not None): obj_type = getattr(parent, obj_type) else: obj_type = sys.modules[obj_type] elif (not isinstance(obj_type, type)): raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}') if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) return obj_type(**args)
def set_random_seed(seed, deterministic=False, use_rank_shift=False): 'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n rank_shift (bool): Whether to add rank number to the random seed to\n have different random seed in different threads. Default: False.\n ' if use_rank_shift: (rank, _) = mmcv.runner.get_dist_info() seed += rank random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ['PYTHONHASHSEED'] = str(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
def is_tensorrt_available(): try: import tensorrt del tensorrt return True except ModuleNotFoundError: return False
def get_tensorrt_op_path(): 'Get TensorRT plugins library path.' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_trt.*.so') paths = glob.glob(wildcard) lib_path = (paths[0] if (len(paths) > 0) else '') return lib_path
def is_tensorrt_plugin_loaded(): 'Check if TensorRT plugins library is loaded or not.\n\n Returns:\n bool: plugin_is_loaded flag\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded return plugin_is_loaded
def load_tensorrt_plugin(): 'load TensorRT plugins library.' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded lib_path = get_tensorrt_op_path() if ((not plugin_is_loaded) and os.path.exists(lib_path)): ctypes.CDLL(lib_path) plugin_is_loaded = True
def preprocess_onnx(onnx_model): 'Modify onnx model to match with TensorRT plugins in mmcv.\n\n There are some conflict between onnx node definition and TensorRT limit.\n This function perform preprocess on the onnx model to solve the conflicts.\n For example, onnx `attribute` is loaded in TensorRT on host and onnx\n `input` is loaded on device. The shape inference is performed on host, so\n any `input` related to shape (such as `max_output_boxes_per_class` in\n NonMaxSuppression) should be transformed to `attribute` before conversion.\n\n Arguments:\n onnx_model (onnx.ModelProto): Input onnx model.\n\n Returns:\n onnx.ModelProto: Modified onnx model.\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) graph = onnx_model.graph nodes = graph.node initializers = graph.initializer node_dict = {} for node in nodes: node_outputs = node.output for output in node_outputs: if (len(output) > 0): node_dict[output] = node init_dict = {_.name: _ for _ in initializers} nodes_name_to_remove = set() def is_node_without_output(name): for (node_name, node) in node_dict.items(): if (node_name not in nodes_name_to_remove): if (name in node.input): return False return True def mark_nodes_to_remove(name): node = node_dict[name] nodes_name_to_remove.add(name) for input_node_name in node.input: if is_node_without_output(input_node_name): mark_nodes_to_remove(input_node_name) def parse_data(name, typ, default_value=0): if (name in node_dict): node = node_dict[name] if (node.op_type == 'Constant'): raw_data = node.attribute[0].t.raw_data else: mark_nodes_to_remove(name) return default_value elif (name in init_dict): raw_data = init_dict[name].raw_data else: raise ValueError(f'{name} not found in node or initilizer.') return np.frombuffer(raw_data, typ).item() nrof_node = len(nodes) for idx in range(nrof_node): node = nodes[idx] node_attributes = node.attribute node_inputs = node.input node_outputs = node.output node_name = node.name if (node.op_type == 'NonMaxSuppression'): center_point_box = 0 max_output_boxes_per_class = 1000000 iou_threshold = 0.3 score_threshold = 0.0 offset = 0 for attribute in node_attributes: if (attribute.name == 'center_point_box'): center_point_box = attribute.i elif (attribute.name == 'offset'): offset = attribute.i if (len(node_inputs) >= 3): max_output_boxes_per_class = parse_data(node_inputs[2], np.int64, max_output_boxes_per_class) mark_nodes_to_remove(node_inputs[2]) if (len(node_inputs) >= 4): iou_threshold = parse_data(node_inputs[3], np.float32, iou_threshold) mark_nodes_to_remove(node_inputs[3]) if (len(node_inputs) >= 5): score_threshold = parse_data(node_inputs[4], np.float32) mark_nodes_to_remove(node_inputs[4]) new_node = onnx.helper.make_node('NonMaxSuppression', node_inputs[:2], node_outputs, name=node_name, center_point_box=center_point_box, max_output_boxes_per_class=max_output_boxes_per_class, iou_threshold=iou_threshold, score_threshold=score_threshold, offset=offset) for output in node_outputs: if (output in node_dict): node_dict[output] = new_node nodes.insert(idx, new_node) nodes.remove(node) elif (node.op_type == 'InstanceNormalization'): node.op_type = 'MMCVInstanceNormalization' for node_name in nodes_name_to_remove: nodes.remove(node_dict[node_name]) return onnx_model
def onnx2trt(onnx_model, opt_shape_dict, log_level=trt.Logger.ERROR, fp16_mode=False, max_workspace_size=0, device_id=0): 'Convert onnx model to tensorrt engine.\n\n Arguments:\n onnx_model (str or onnx.ModelProto): the onnx model to convert from\n opt_shape_dict (dict): the min/opt/max shape of each input\n log_level (TensorRT log level): the log level of TensorRT\n fp16_mode (bool): enable fp16 mode\n max_workspace_size (int): set max workspace size of TensorRT engine.\n some tactic and layers need large workspace.\n device_id (int): choice the device to create engine.\n\n Returns:\n tensorrt.ICudaEngine: the TensorRT engine created from onnx_model\n\n Example:\n >>> engine = onnx2trt(\n >>> "onnx_model.onnx",\n >>> {\'input\': [[1, 3, 160, 160],\n >>> [1, 3, 320, 320],\n >>> [1, 3, 640, 640]]},\n >>> log_level=trt.Logger.WARNING,\n >>> fp16_mode=True,\n >>> max_workspace_size=1 << 30,\n >>> device_id=0)\n >>> })\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) device = torch.device('cuda:{}'.format(device_id)) logger = trt.Logger(log_level) builder = trt.Builder(logger) EXPLICIT_BATCH = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(EXPLICIT_BATCH) parser = trt.OnnxParser(network, logger) if isinstance(onnx_model, str): onnx_model = onnx.load(onnx_model) onnx_model = preprocess_onnx(onnx_model) if (not parser.parse(onnx_model.SerializeToString())): error_msgs = '' for error in range(parser.num_errors): error_msgs += f'''{parser.get_error(error)} ''' raise RuntimeError(f'''parse onnx failed: {error_msgs}''') builder.max_workspace_size = max_workspace_size config = builder.create_builder_config() config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for (input_name, param) in opt_shape_dict.items(): min_shape = tuple(param[0][:]) opt_shape = tuple(param[1][:]) max_shape = tuple(param[2][:]) profile.set_shape(input_name, min_shape, opt_shape, max_shape) config.add_optimization_profile(profile) if fp16_mode: builder.fp16_mode = fp16_mode config.set_flag(trt.BuilderFlag.FP16) with torch.cuda.device(device): engine = builder.build_engine(network, config) return engine
def save_trt_engine(engine, path): 'Serialize TensorRT engine to disk.\n\n Arguments:\n engine (tensorrt.ICudaEngine): TensorRT engine to serialize\n path (str): disk path to write the engine\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with open(path, mode='wb') as f: f.write(bytearray(engine.serialize()))
def load_trt_engine(path): 'Deserialize TensorRT engine from disk.\n\n Arguments:\n path (str): disk path to read the engine\n\n Returns:\n tensorrt.ICudaEngine: the TensorRT engine loaded from disk\n ' (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) with trt.Logger() as logger, trt.Runtime(logger) as runtime: with open(path, mode='rb') as f: engine_bytes = f.read() engine = runtime.deserialize_cuda_engine(engine_bytes) return engine
def torch_dtype_from_trt(dtype): 'Convert pytorch dtype to TensorRT dtype.' if (dtype == trt.bool): return torch.bool elif (dtype == trt.int8): return torch.int8 elif (dtype == trt.int32): return torch.int32 elif (dtype == trt.float16): return torch.float16 elif (dtype == trt.float32): return torch.float32 else: raise TypeError(('%s is not supported by torch' % dtype))
def torch_device_from_trt(device): 'Convert pytorch device to TensorRT device.' if (device == trt.TensorLocation.DEVICE): return torch.device('cuda') elif (device == trt.TensorLocation.HOST): return torch.device('cpu') else: return TypeError(('%s is not supported by torch' % device))
class TRTWrapper(torch.nn.Module): 'TensorRT engine Wrapper.\n\n Arguments:\n engine (tensorrt.ICudaEngine): TensorRT engine to wrap\n input_names (list[str]): names of each inputs\n output_names (list[str]): names of each outputs\n\n Note:\n If the engine is converted from onnx model. The input_names and\n output_names should be the same as onnx model.\n ' def __init__(self, engine, input_names=None, output_names=None): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This tool will be deprecated in future. ' msg += (blue_text + 'Welcome to use the unified model deployment toolbox ') msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) super(TRTWrapper, self).__init__() self.engine = engine if isinstance(self.engine, str): self.engine = load_trt_engine(engine) if (not isinstance(self.engine, trt.ICudaEngine)): raise TypeError('engine should be str or trt.ICudaEngine') self._register_state_dict_hook(TRTWrapper._on_state_dict) self.context = self.engine.create_execution_context() if ((input_names is None) or (output_names is None)): names = [_ for _ in self.engine] input_names = list(filter(self.engine.binding_is_input, names)) output_names = list((set(names) - set(input_names))) self.input_names = input_names self.output_names = output_names def _on_state_dict(self, state_dict, prefix, local_metadata): state_dict[(prefix + 'engine')] = bytearray(self.engine.serialize()) state_dict[(prefix + 'input_names')] = self.input_names state_dict[(prefix + 'output_names')] = self.output_names def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): engine_bytes = state_dict[(prefix + 'engine')] with trt.Logger() as logger, trt.Runtime(logger) as runtime: self.engine = runtime.deserialize_cuda_engine(engine_bytes) self.context = self.engine.create_execution_context() self.input_names = state_dict[(prefix + 'input_names')] self.output_names = state_dict[(prefix + 'output_names')] def forward(self, inputs): '\n Arguments:\n inputs (dict): dict of input name-tensors pair\n\n Return:\n dict: dict of output name-tensors pair\n ' assert (self.input_names is not None) assert (self.output_names is not None) bindings = ([None] * (len(self.input_names) + len(self.output_names))) for (input_name, input_tensor) in inputs.items(): idx = self.engine.get_binding_index(input_name) if (input_tensor.dtype == torch.long): input_tensor = input_tensor.int() self.context.set_binding_shape(idx, tuple(input_tensor.shape)) bindings[idx] = input_tensor.contiguous().data_ptr() outputs = {} for (i, output_name) in enumerate(self.output_names): idx = self.engine.get_binding_index(output_name) dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) shape = tuple(self.context.get_binding_shape(idx)) device = torch_device_from_trt(self.engine.get_location(idx)) output = torch.empty(size=shape, dtype=dtype, device=device) outputs[output_name] = output bindings[idx] = output.data_ptr() self.context.execute_async_v2(bindings, torch.cuda.current_stream().cuda_stream) return outputs
class TRTWraper(TRTWrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn('TRTWraper will be deprecated in future. Please use TRTWrapper instead', DeprecationWarning)
class ConfigDict(Dict): def __missing__(self, name): raise KeyError(name) def __getattr__(self, name): try: value = super(ConfigDict, self).__getattr__(name) except KeyError: ex = AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") except Exception as e: ex = e else: return value raise ex
def add_args(parser, cfg, prefix=''): for (k, v) in cfg.items(): if isinstance(v, str): parser.add_argument((('--' + prefix) + k)) elif isinstance(v, int): parser.add_argument((('--' + prefix) + k), type=int) elif isinstance(v, float): parser.add_argument((('--' + prefix) + k), type=float) elif isinstance(v, bool): parser.add_argument((('--' + prefix) + k), action='store_true') elif isinstance(v, dict): add_args(parser, v, ((prefix + k) + '.')) elif isinstance(v, abc.Iterable): parser.add_argument((('--' + prefix) + k), type=type(v[0]), nargs='+') else: print(f'cannot parse key {(prefix + k)} of type {type(v)}') return parser
class Config(): 'A facility for config and config files.\n\n It supports common file formats as configs: python/json/yaml. The interface\n is the same as a dict object and also allows access config values as\n attributes.\n\n Example:\n >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n >>> cfg.a\n 1\n >>> cfg.b\n {\'b1\': [0, 1]}\n >>> cfg.b.b1\n [0, 1]\n >>> cfg = Config.fromfile(\'tests/data/config/a.py\')\n >>> cfg.filename\n "/home/kchen/projects/mmcv/tests/data/config/a.py"\n >>> cfg.item4\n \'test\'\n >>> cfg\n "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "\n "{\'item1\': [1, 2], \'item2\': {\'a\': 0}, \'item3\': True, \'item4\': \'test\'}"\n ' @staticmethod def _validate_py_syntax(filename): with open(filename, 'r', encoding='utf-8') as f: content = f.read() try: ast.parse(content) except SyntaxError as e: raise SyntaxError(f'There are syntax errors in config file {filename}: {e}') @staticmethod def _substitute_predefined_vars(filename, temp_config_name): file_dirname = osp.dirname(filename) file_basename = osp.basename(filename) file_basename_no_extension = osp.splitext(file_basename)[0] file_extname = osp.splitext(filename)[1] support_templates = dict(fileDirname=file_dirname, fileBasename=file_basename, fileBasenameNoExtension=file_basename_no_extension, fileExtname=file_extname) with open(filename, 'r', encoding='utf-8') as f: config_file = f.read() for (key, value) in support_templates.items(): regexp = (('\\{\\{\\s*' + str(key)) + '\\s*\\}\\}') value = value.replace('\\', '/') config_file = re.sub(regexp, value, config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) @staticmethod def _pre_substitute_base_vars(filename, temp_config_name): 'Substitute base variable placehoders to string, so that parsing\n would work.' with open(filename, 'r', encoding='utf-8') as f: config_file = f.read() base_var_dict = {} regexp = (('\\{\\{\\s*' + BASE_KEY) + '\\.([\\w\\.]+)\\s*\\}\\}') base_vars = set(re.findall(regexp, config_file)) for base_var in base_vars: randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}' base_var_dict[randstr] = base_var regexp = (((('\\{\\{\\s*' + BASE_KEY) + '\\.') + base_var) + '\\s*\\}\\}') config_file = re.sub(regexp, f'"{randstr}"', config_file) with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file: tmp_config_file.write(config_file) return base_var_dict @staticmethod def _substitute_base_vars(cfg, base_var_dict, base_cfg): 'Substitute variable strings to their actual values.' cfg = copy.deepcopy(cfg) if isinstance(cfg, dict): for (k, v) in cfg.items(): if (isinstance(v, str) and (v in base_var_dict)): new_v = base_cfg for new_k in base_var_dict[v].split('.'): new_v = new_v[new_k] cfg[k] = new_v elif isinstance(v, (list, tuple, dict)): cfg[k] = Config._substitute_base_vars(v, base_var_dict, base_cfg) elif isinstance(cfg, tuple): cfg = tuple((Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg)) elif isinstance(cfg, list): cfg = [Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg] elif (isinstance(cfg, str) and (cfg in base_var_dict)): new_v = base_cfg for new_k in base_var_dict[cfg].split('.'): new_v = new_v[new_k] cfg = new_v return cfg @staticmethod def _file2dict(filename, use_predefined_variables=True): filename = osp.abspath(osp.expanduser(filename)) check_file_exist(filename) fileExtname = osp.splitext(filename)[1] if (fileExtname not in ['.py', '.json', '.yaml', '.yml']): raise IOError('Only py/yml/yaml/json type are supported now!') with tempfile.TemporaryDirectory() as temp_config_dir: temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=fileExtname) if (platform.system() == 'Windows'): temp_config_file.close() temp_config_name = osp.basename(temp_config_file.name) if use_predefined_variables: Config._substitute_predefined_vars(filename, temp_config_file.name) else: shutil.copyfile(filename, temp_config_file.name) base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name) if filename.endswith('.py'): temp_module_name = osp.splitext(temp_config_name)[0] sys.path.insert(0, temp_config_dir) Config._validate_py_syntax(filename) mod = import_module(temp_module_name) sys.path.pop(0) cfg_dict = {name: value for (name, value) in mod.__dict__.items() if (not name.startswith('__'))} del sys.modules[temp_module_name] elif filename.endswith(('.yml', '.yaml', '.json')): import mmcv cfg_dict = mmcv.load(temp_config_file.name) temp_config_file.close() if (DEPRECATION_KEY in cfg_dict): deprecation_info = cfg_dict.pop(DEPRECATION_KEY) warning_msg = f'The config file {filename} will be deprecated in the future.' if ('expected' in deprecation_info): warning_msg += f" Please use {deprecation_info['expected']} instead." if ('reference' in deprecation_info): warning_msg += f" More information can be found at {deprecation_info['reference']}" warnings.warn(warning_msg, DeprecationWarning) cfg_text = (filename + '\n') with open(filename, 'r', encoding='utf-8') as f: cfg_text += f.read() if (BASE_KEY in cfg_dict): cfg_dir = osp.dirname(filename) base_filename = cfg_dict.pop(BASE_KEY) base_filename = (base_filename if isinstance(base_filename, list) else [base_filename]) cfg_dict_list = list() cfg_text_list = list() for f in base_filename: (_cfg_dict, _cfg_text) = Config._file2dict(osp.join(cfg_dir, f)) cfg_dict_list.append(_cfg_dict) cfg_text_list.append(_cfg_text) base_cfg_dict = dict() for c in cfg_dict_list: duplicate_keys = (base_cfg_dict.keys() & c.keys()) if (len(duplicate_keys) > 0): raise KeyError(f'Duplicate key is not allowed among bases. Duplicate keys: {duplicate_keys}') base_cfg_dict.update(c) cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict) base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict) cfg_dict = base_cfg_dict cfg_text_list.append(cfg_text) cfg_text = '\n'.join(cfg_text_list) return (cfg_dict, cfg_text) @staticmethod def _merge_a_into_b(a, b, allow_list_keys=False): "merge dict ``a`` into dict ``b`` (non-inplace).\n\n Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid\n in-place modifications.\n\n Args:\n a (dict): The source dict to be merged into ``b``.\n b (dict): The origin dict to be fetch keys from ``a``.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in source ``a`` and will replace the element of the\n corresponding index in b if b is a list. Default: False.\n\n Returns:\n dict: The modified dict of ``b`` using ``a``.\n\n Examples:\n # Normally merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # Delete b first and merge a into b.\n >>> Config._merge_a_into_b(\n ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))\n {'obj': {'a': 2}}\n\n # b is a list\n >>> Config._merge_a_into_b(\n ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)\n [{'a': 2}, {'b': 2}]\n " b = b.copy() for (k, v) in a.items(): if (allow_list_keys and k.isdigit() and isinstance(b, list)): k = int(k) if (len(b) <= k): raise KeyError(f'Index {k} exceeds the length of list {b}') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) elif isinstance(v, dict): if ((k in b) and (not v.pop(DELETE_KEY, False))): allowed_types = ((dict, list) if allow_list_keys else dict) if (not isinstance(b[k], allowed_types)): raise TypeError(f'{k}={v} in child config cannot inherit from base because {k} is a dict in the child config but is of type {type(b[k])} in base config. You may set `{DELETE_KEY}=True` to ignore the base config.') b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys) else: b[k] = ConfigDict(v) else: b[k] = v return b @staticmethod def fromfile(filename, use_predefined_variables=True, import_custom_modules=True): (cfg_dict, cfg_text) = Config._file2dict(filename, use_predefined_variables) if (import_custom_modules and cfg_dict.get('custom_imports', None)): import_modules_from_strings(**cfg_dict['custom_imports']) return Config(cfg_dict, cfg_text=cfg_text, filename=filename) @staticmethod def fromstring(cfg_str, file_format): 'Generate config from config str.\n\n Args:\n cfg_str (str): Config str.\n file_format (str): Config file format corresponding to the\n config str. Only py/yml/yaml/json type are supported now!\n\n Returns:\n :obj:`Config`: Config obj.\n ' if (file_format not in ['.py', '.json', '.yaml', '.yml']): raise IOError('Only py/yml/yaml/json type are supported now!') if ((file_format != '.py') and ('dict(' in cfg_str)): warnings.warn('Please check "file_format", the file format may be .py') with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix=file_format, delete=False) as temp_file: temp_file.write(cfg_str) cfg = Config.fromfile(temp_file.name) os.remove(temp_file.name) return cfg @staticmethod def auto_argparser(description=None): 'Generate argparser from config file automatically (experimental)' partial_parser = ArgumentParser(description=description) partial_parser.add_argument('config', help='config file path') cfg_file = partial_parser.parse_known_args()[0].config cfg = Config.fromfile(cfg_file) parser = ArgumentParser(description=description) parser.add_argument('config', help='config file path') add_args(parser, cfg) return (parser, cfg) def __init__(self, cfg_dict=None, cfg_text=None, filename=None): if (cfg_dict is None): cfg_dict = dict() elif (not isinstance(cfg_dict, dict)): raise TypeError(f'cfg_dict must be a dict, but got {type(cfg_dict)}') for key in cfg_dict: if (key in RESERVED_KEYS): raise KeyError(f'{key} is reserved for config file') super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict)) super(Config, self).__setattr__('_filename', filename) if cfg_text: text = cfg_text elif filename: with open(filename, 'r') as f: text = f.read() else: text = '' super(Config, self).__setattr__('_text', text) @property def filename(self): return self._filename @property def text(self): return self._text @property def pretty_text(self): indent = 4 def _indent(s_, num_spaces): s = s_.split('\n') if (len(s) == 1): return s_ first = s.pop(0) s = [((num_spaces * ' ') + line) for line in s] s = '\n'.join(s) s = ((first + '\n') + s) return s def _format_basic_types(k, v, use_mapping=False): if isinstance(v, str): v_str = f"'{v}'" else: v_str = str(v) if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = _indent(attr_str, indent) return attr_str def _format_list(k, v, use_mapping=False): if all((isinstance(_, dict) for _ in v)): v_str = '[\n' v_str += '\n'.join((f'dict({_indent(_format_dict(v_), indent)}),' for v_ in v)).rstrip(',') if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: {v_str}' else: attr_str = f'{str(k)}={v_str}' attr_str = (_indent(attr_str, indent) + ']') else: attr_str = _format_basic_types(k, v, use_mapping) return attr_str def _contain_invalid_identifier(dict_str): contain_invalid_identifier = False for key_name in dict_str: contain_invalid_identifier |= (not str(key_name).isidentifier()) return contain_invalid_identifier def _format_dict(input_dict, outest_level=False): r = '' s = [] use_mapping = _contain_invalid_identifier(input_dict) if use_mapping: r += '{' for (idx, (k, v)) in enumerate(input_dict.items()): is_last = (idx >= (len(input_dict) - 1)) end = ('' if (outest_level or is_last) else ',') if isinstance(v, dict): v_str = ('\n' + _format_dict(v)) if use_mapping: k_str = (f"'{k}'" if isinstance(k, str) else str(k)) attr_str = f'{k_str}: dict({v_str}' else: attr_str = f'{str(k)}=dict({v_str}' attr_str = ((_indent(attr_str, indent) + ')') + end) elif isinstance(v, list): attr_str = (_format_list(k, v, use_mapping) + end) else: attr_str = (_format_basic_types(k, v, use_mapping) + end) s.append(attr_str) r += '\n'.join(s) if use_mapping: r += '}' return r cfg_dict = self._cfg_dict.to_dict() text = _format_dict(cfg_dict, outest_level=True) yapf_style = dict(based_on_style='pep8', blank_line_before_nested_class_or_def=True, split_before_expression_after_opening_paren=True) (text, _) = FormatCode(text, style_config=yapf_style, verify=True) return text def __repr__(self): return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}' def __len__(self): return len(self._cfg_dict) def __getattr__(self, name): return getattr(self._cfg_dict, name) def __getitem__(self, name): return self._cfg_dict.__getitem__(name) def __setattr__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setattr__(name, value) def __setitem__(self, name, value): if isinstance(value, dict): value = ConfigDict(value) self._cfg_dict.__setitem__(name, value) def __iter__(self): return iter(self._cfg_dict) def __getstate__(self): return (self._cfg_dict, self._filename, self._text) def __copy__(self): cls = self.__class__ other = cls.__new__(cls) other.__dict__.update(self.__dict__) return other def __deepcopy__(self, memo): cls = self.__class__ other = cls.__new__(cls) memo[id(self)] = other for (key, value) in self.__dict__.items(): super(Config, other).__setattr__(key, copy.deepcopy(value, memo)) return other def __setstate__(self, state): (_cfg_dict, _filename, _text) = state super(Config, self).__setattr__('_cfg_dict', _cfg_dict) super(Config, self).__setattr__('_filename', _filename) super(Config, self).__setattr__('_text', _text) def dump(self, file=None): cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict() if self.filename.endswith('.py'): if (file is None): return self.pretty_text else: with open(file, 'w', encoding='utf-8') as f: f.write(self.pretty_text) else: import mmcv if (file is None): file_format = self.filename.split('.')[(- 1)] return mmcv.dump(cfg_dict, file_format=file_format) else: mmcv.dump(cfg_dict, file) def merge_from_dict(self, options, allow_list_keys=True): "Merge list into cfg_dict.\n\n Merge the dict parsed by MultipleKVAction into this cfg.\n\n Examples:\n >>> options = {'model.backbone.depth': 50,\n ... 'model.backbone.with_cp':True}\n >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n >>> cfg.merge_from_dict(options)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(\n ... model=dict(backbone=dict(depth=50, with_cp=True)))\n\n >>> # Merge list element\n >>> cfg = Config(dict(pipeline=[\n ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))\n >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})\n >>> cfg.merge_from_dict(options, allow_list_keys=True)\n >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n >>> assert cfg_dict == dict(pipeline=[\n ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])\n\n Args:\n options (dict): dict of configs to merge from.\n allow_list_keys (bool): If True, int string keys (e.g. '0', '1')\n are allowed in ``options`` and will replace the element of the\n corresponding index in the config if the config is a list.\n Default: True.\n " option_cfg_dict = {} for (full_key, v) in options.items(): d = option_cfg_dict key_list = full_key.split('.') for subkey in key_list[:(- 1)]: d.setdefault(subkey, ConfigDict()) d = d[subkey] subkey = key_list[(- 1)] d[subkey] = v cfg_dict = super(Config, self).__getattribute__('_cfg_dict') super(Config, self).__setattr__('_cfg_dict', Config._merge_a_into_b(option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))
class DictAction(Action): "\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options can\n be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit\n brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build\n list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'\n " @staticmethod def _parse_int_float_bool(val): try: return int(val) except ValueError: pass try: return float(val) except ValueError: pass if (val.lower() in ['true', 'false']): return (True if (val.lower() == 'true') else False) return val @staticmethod def _parse_iterable(val): "Parse iterable values in the string.\n\n All elements inside '()' or '[]' are treated as iterable values.\n\n Args:\n val (str): Value string.\n\n Returns:\n list | tuple: The expanded list or tuple from the string.\n\n Examples:\n >>> DictAction._parse_iterable('1,2,3')\n [1, 2, 3]\n >>> DictAction._parse_iterable('[a, b, c]')\n ['a', 'b', 'c']\n >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')\n [(1, 2, 3), ['a', 'b'], 'c']\n " def find_next_comma(string): "Find the position of next comma in the string.\n\n If no ',' is found in the string, return the string length. All\n chars inside '()' and '[]' are treated as one element and thus ','\n inside these brackets are ignored.\n " assert ((string.count('(') == string.count(')')) and (string.count('[') == string.count(']'))), f'Imbalanced brackets exist in {string}' end = len(string) for (idx, char) in enumerate(string): pre = string[:idx] if ((char == ',') and (pre.count('(') == pre.count(')')) and (pre.count('[') == pre.count(']'))): end = idx break return end val = val.strip('\'"').replace(' ', '') is_tuple = False if (val.startswith('(') and val.endswith(')')): is_tuple = True val = val[1:(- 1)] elif (val.startswith('[') and val.endswith(']')): val = val[1:(- 1)] elif (',' not in val): return DictAction._parse_int_float_bool(val) values = [] while (len(val) > 0): comma_idx = find_next_comma(val) element = DictAction._parse_iterable(val[:comma_idx]) values.append(element) val = val[(comma_idx + 1):] if is_tuple: values = tuple(values) return values def __call__(self, parser, namespace, values, option_string=None): options = {} for kv in values: (key, val) = kv.split('=', maxsplit=1) options[key] = self._parse_iterable(val) setattr(namespace, self.dest, options)
def collect_env(): 'Collect the information of the running environments.\n\n Returns:\n dict: The environment information. The following fields are contained.\n\n - sys.platform: The variable of ``sys.platform``.\n - Python: Python version.\n - CUDA available: Bool, indicating if CUDA is available.\n - GPU devices: Device type of each GPU.\n - CUDA_HOME (optional): The env var ``CUDA_HOME``.\n - NVCC (optional): NVCC version.\n - GCC: GCC version, "n/a" if GCC is not installed.\n - PyTorch: PyTorch version.\n - PyTorch compiling details: The output of ``torch.__config__.show()``.\n - TorchVision (optional): TorchVision version.\n - OpenCV: OpenCV version.\n - MMCV: MMCV version.\n - MMCV Compiler: The GCC version for compiling MMCV ops.\n - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.\n ' env_info = {} env_info['sys.platform'] = sys.platform env_info['Python'] = sys.version.replace('\n', '') cuda_available = torch.cuda.is_available() env_info['CUDA available'] = cuda_available if cuda_available: devices = defaultdict(list) for k in range(torch.cuda.device_count()): devices[torch.cuda.get_device_name(k)].append(str(k)) for (name, device_ids) in devices.items(): env_info[('GPU ' + ','.join(device_ids))] = name from mmcv.utils.parrots_wrapper import _get_cuda_home CUDA_HOME = _get_cuda_home() env_info['CUDA_HOME'] = CUDA_HOME if ((CUDA_HOME is not None) and osp.isdir(CUDA_HOME)): try: nvcc = osp.join(CUDA_HOME, 'bin/nvcc') nvcc = subprocess.check_output(f'"{nvcc}" -V | tail -n1', shell=True) nvcc = nvcc.decode('utf-8').strip() except subprocess.SubprocessError: nvcc = 'Not Available' env_info['NVCC'] = nvcc try: gcc = subprocess.check_output('gcc --version | head -n1', shell=True) gcc = gcc.decode('utf-8').strip() env_info['GCC'] = gcc except subprocess.CalledProcessError: env_info['GCC'] = 'n/a' env_info['PyTorch'] = torch.__version__ env_info['PyTorch compiling details'] = get_build_config() try: import torchvision env_info['TorchVision'] = torchvision.__version__ except ModuleNotFoundError: pass env_info['OpenCV'] = cv2.__version__ env_info['MMCV'] = mmcv.__version__ try: from mmcv.ops import get_compiler_version, get_compiling_cuda_version except ModuleNotFoundError: env_info['MMCV Compiler'] = 'n/a' env_info['MMCV CUDA Compiler'] = 'n/a' else: env_info['MMCV Compiler'] = get_compiler_version() env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version() return env_info
def check_ops_exist(): ext_loader = pkgutil.find_loader('mmcv._ext') return (ext_loader is not None)
def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): 'Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n "Error" thus be silent most of the time.\n file_mode (str): The file mode used in opening log file.\n Defaults to \'w\'.\n\n Returns:\n logging.Logger: The expected logger.\n ' logger = logging.getLogger(name) if (name in logger_initialized): return logger for logger_name in logger_initialized: if name.startswith(logger_name): return logger for handler in logger.root.handlers: if (type(handler) is logging.StreamHandler): handler.setLevel(logging.ERROR) stream_handler = logging.StreamHandler() handlers = [stream_handler] if (dist.is_available() and dist.is_initialized()): rank = dist.get_rank() else: rank = 0 if ((rank == 0) and (log_file is not None)): file_handler = logging.FileHandler(log_file, file_mode) handlers.append(file_handler) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') for handler in handlers: handler.setFormatter(formatter) handler.setLevel(log_level) logger.addHandler(handler) if (rank == 0): logger.setLevel(log_level) else: logger.setLevel(logging.ERROR) logger_initialized[name] = True return logger
def print_log(msg, logger=None, level=logging.INFO): 'Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used.\n Some special loggers are:\n - "silent": no message will be printed.\n - other str: the logger obtained with `get_root_logger(logger)`.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or "root".\n ' if (logger is None): print(msg) elif isinstance(logger, logging.Logger): logger.log(level, msg) elif (logger == 'silent'): pass elif isinstance(logger, str): _logger = get_logger(logger) _logger.log(level, msg) else: raise TypeError(f'logger should be either a logging.Logger object, str, "silent" or None, but got {type(logger)}')
def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse
def is_str(x): 'Whether the input is an string instance.\n\n Note: This method is deprecated since python 2 is no longer supported.\n ' return isinstance(x, str)
def import_modules_from_strings(imports, allow_failed_imports=False): "Import modules from the given list of strings.\n\n Args:\n imports (list | str | None): The given module names to be imported.\n allow_failed_imports (bool): If True, the failed imports will return\n None. Otherwise, an ImportError is raise. Default: False.\n\n Returns:\n list[module] | module | None: The imported modules.\n\n Examples:\n >>> osp, sys = import_modules_from_strings(\n ... ['os.path', 'sys'])\n >>> import os.path as osp_\n >>> import sys as sys_\n >>> assert osp == osp_\n >>> assert sys == sys_\n " if (not imports): return single_import = False if isinstance(imports, str): single_import = True imports = [imports] if (not isinstance(imports, list)): raise TypeError(f'custom_imports must be a list but got type {type(imports)}') imported = [] for imp in imports: if (not isinstance(imp, str)): raise TypeError(f'{imp} is of type {type(imp)} and cannot be imported.') try: imported_tmp = import_module(imp) except ImportError: if allow_failed_imports: warnings.warn(f'{imp} failed to import and is ignored.', UserWarning) imported_tmp = None else: raise ImportError imported.append(imported_tmp) if single_import: imported = imported[0] return imported
def iter_cast(inputs, dst_type, return_type=None): 'Cast elements of an iterable object into some type.\n\n Args:\n inputs (Iterable): The input object.\n dst_type (type): Destination type.\n return_type (type, optional): If specified, the output object will be\n converted to this type, otherwise an iterator.\n\n Returns:\n iterator or specified type: The converted object.\n ' if (not isinstance(inputs, abc.Iterable)): raise TypeError('inputs must be an iterable object') if (not isinstance(dst_type, type)): raise TypeError('"dst_type" must be a valid type') out_iterable = map(dst_type, inputs) if (return_type is None): return out_iterable else: return return_type(out_iterable)
def list_cast(inputs, dst_type): 'Cast elements of an iterable object into a list of some type.\n\n A partial method of :func:`iter_cast`.\n ' return iter_cast(inputs, dst_type, return_type=list)
def tuple_cast(inputs, dst_type): 'Cast elements of an iterable object into a tuple of some type.\n\n A partial method of :func:`iter_cast`.\n ' return iter_cast(inputs, dst_type, return_type=tuple)
def is_seq_of(seq, expected_type, seq_type=None): 'Check whether it is a sequence of some type.\n\n Args:\n seq (Sequence): The sequence to be checked.\n expected_type (type): Expected type of sequence items.\n seq_type (type, optional): Expected sequence type.\n\n Returns:\n bool: Whether the sequence is valid.\n ' if (seq_type is None): exp_seq_type = abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if (not isinstance(seq, exp_seq_type)): return False for item in seq: if (not isinstance(item, expected_type)): return False return True
def is_list_of(seq, expected_type): 'Check whether it is a list of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=list)
def is_tuple_of(seq, expected_type): 'Check whether it is a tuple of some type.\n\n A partial method of :func:`is_seq_of`.\n ' return is_seq_of(seq, expected_type, seq_type=tuple)
def slice_list(in_list, lens): 'Slice a list into several sub lists by a list of given length.\n\n Args:\n in_list (list): The list to be sliced.\n lens(int or list): The expected length of each out list.\n\n Returns:\n list: A list of sliced list.\n ' if isinstance(lens, int): assert ((len(in_list) % lens) == 0) lens = ([lens] * int((len(in_list) / lens))) if (not isinstance(lens, list)): raise TypeError('"indices" must be an integer or a list of integers') elif (sum(lens) != len(in_list)): raise ValueError(f'sum of lens and list length does not match: {sum(lens)} != {len(in_list)}') out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:(idx + lens[i])]) idx += lens[i] return out_list
def concat_list(in_list): 'Concatenate a list of list into a single list.\n\n Args:\n in_list (list): The list of list to be merged.\n\n Returns:\n list: The concatenated flat list.\n ' return list(itertools.chain(*in_list))
def check_prerequisites(prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not found, please install them first.'): 'A decorator factory to check if prerequisites are satisfied.\n\n Args:\n prerequisites (str of list[str]): Prerequisites to be checked.\n checker (callable): The checker method that returns True if a\n prerequisite is meet, False otherwise.\n msg_tmpl (str): The message template with two variables.\n\n Returns:\n decorator: A specific decorator.\n ' def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = ([prerequisites] if isinstance(prerequisites, str) else prerequisites) missing = [] for item in requirements: if (not checker(item)): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
def _check_py_package(package): try: import_module(package) except ImportError: return False else: return True
def _check_executable(cmd): if (subprocess.call(f'which {cmd}', shell=True) != 0): return False else: return True
def requires_package(prerequisites): "A decorator to check if some python packages are installed.\n\n Example:\n >>> @requires_package('numpy')\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n array([0.])\n >>> @requires_package(['numpy', 'non_package'])\n >>> func(arg1, args):\n >>> return numpy.zeros(1)\n ImportError\n " return check_prerequisites(prerequisites, checker=_check_py_package)
def requires_executable(prerequisites): "A decorator to check if some executable files are installed.\n\n Example:\n >>> @requires_executable('ffmpeg')\n >>> func(arg1, args):\n >>> print(1)\n 1\n " return check_prerequisites(prerequisites, checker=_check_executable)
def deprecated_api_warning(name_dict, cls_name=None): 'A decorator to check if some arguments are deprecate and try to replace\n deprecate src_arg_name to dst_arg_name.\n\n Args:\n name_dict(dict):\n key (str): Deprecate argument names.\n val (str): Expected argument names.\n\n Returns:\n func: New function.\n ' def api_warning_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): args_info = getfullargspec(old_func) func_name = old_func.__name__ if (cls_name is not None): func_name = f'{cls_name}.{func_name}' if args: arg_names = args_info.args[:len(args)] for (src_arg_name, dst_arg_name) in name_dict.items(): if (src_arg_name in arg_names): warnings.warn(f'"{src_arg_name}" is deprecated in `{func_name}`, please use "{dst_arg_name}" instead', DeprecationWarning) arg_names[arg_names.index(src_arg_name)] = dst_arg_name if kwargs: for (src_arg_name, dst_arg_name) in name_dict.items(): if (src_arg_name in kwargs): assert (dst_arg_name not in kwargs), f'The expected behavior is to replace the deprecated key `{src_arg_name}` to new key `{dst_arg_name}`, but got them in the arguments at the same time, which is confusing. `{src_arg_name} will be deprecated in the future, please use `{dst_arg_name}` instead.' warnings.warn(f'"{src_arg_name}" is deprecated in `{func_name}`, please use "{dst_arg_name}" instead', DeprecationWarning) kwargs[dst_arg_name] = kwargs.pop(src_arg_name) output = old_func(*args, **kwargs) return output return new_func return api_warning_wrapper
def is_method_overridden(method, base_class, derived_class): 'Check if a method of base class is overridden in derived class.\n\n Args:\n method (str): the method name to check.\n base_class (type): the class of the base class.\n derived_class (type | Any): the class or instance of the derived class.\n ' assert isinstance(base_class, type), "base_class doesn't accept instance, Please pass class instead." if (not isinstance(derived_class, type)): derived_class = derived_class.__class__ base_method = getattr(base_class, method) derived_method = getattr(derived_class, method) return (derived_method != base_method)
def has_method(obj: object, method: str) -> bool: 'Check whether the object has a method.\n\n Args:\n method (str): The method name to check.\n obj (object): The object to check.\n\n Returns:\n bool: True if the object has the method else False.\n ' return (hasattr(obj, method) and callable(getattr(obj, method)))
def is_rocm_pytorch() -> bool: is_rocm = False if (TORCH_VERSION != 'parrots'): try: from torch.utils.cpp_extension import ROCM_HOME is_rocm = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False) except ImportError: pass return is_rocm
def _get_cuda_home(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import CUDA_HOME elif is_rocm_pytorch(): from torch.utils.cpp_extension import ROCM_HOME CUDA_HOME = ROCM_HOME else: from torch.utils.cpp_extension import CUDA_HOME return CUDA_HOME
def get_build_config(): if (TORCH_VERSION == 'parrots'): from parrots.config import get_build_info return get_build_info() else: return torch.__config__.show()
def _get_conv(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin else: from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin return (_ConvNd, _ConvTransposeMixin)
def _get_dataloader(): if (TORCH_VERSION == 'parrots'): from torch.utils.data import DataLoader, PoolDataLoader else: from torch.utils.data import DataLoader PoolDataLoader = DataLoader return (DataLoader, PoolDataLoader)
def _get_extension(): if (TORCH_VERSION == 'parrots'): from parrots.utils.build_extension import BuildExtension, Extension CppExtension = partial(Extension, cuda=False) CUDAExtension = partial(Extension, cuda=True) else: from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension return (BuildExtension, CppExtension, CUDAExtension)
def _get_pool(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.pool import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd else: from torch.nn.modules.pooling import _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd return (_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd)
def _get_norm(): if (TORCH_VERSION == 'parrots'): from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm2d else: from torch.nn.modules.batchnorm import _BatchNorm from torch.nn.modules.instancenorm import _InstanceNorm SyncBatchNorm_ = torch.nn.SyncBatchNorm return (_BatchNorm, _InstanceNorm, SyncBatchNorm_)
class SyncBatchNorm(SyncBatchNorm_): def _check_input_dim(self, input): if (TORCH_VERSION == 'parrots'): if (input.dim() < 2): raise ValueError(f'expected at least 2D input (got {input.dim()}D input)') else: super()._check_input_dim(input)
def is_filepath(x): return (is_str(x) or isinstance(x, Path))
def fopen(filepath, *args, **kwargs): if is_str(filepath): return open(filepath, *args, **kwargs) elif isinstance(filepath, Path): return filepath.open(*args, **kwargs) raise ValueError('`filepath` should be a string or a Path')
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): if (not osp.isfile(filename)): raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=511): if (dir_name == ''): return dir_name = osp.expanduser(dir_name) os.makedirs(dir_name, mode=mode, exist_ok=True)
def symlink(src, dst, overwrite=True, **kwargs): if (os.path.lexists(dst) and overwrite): os.remove(dst) os.symlink(src, dst, **kwargs)
def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): 'Scan a directory to find the interested files.\n\n Args:\n dir_path (str | :obj:`Path`): Path of the directory.\n suffix (str | tuple(str), optional): File suffix that we are\n interested in. Default: None.\n recursive (bool, optional): If set to True, recursively scan the\n directory. Default: False.\n case_sensitive (bool, optional) : If set to False, ignore the case of\n suffix. Default: True.\n\n Returns:\n A generator for all the interested files with relative paths.\n ' if isinstance(dir_path, (str, Path)): dir_path = str(dir_path) else: raise TypeError('"dir_path" must be a string or Path object') if ((suffix is not None) and (not isinstance(suffix, (str, tuple)))): raise TypeError('"suffix" must be a string or tuple of strings') if ((suffix is not None) and (not case_sensitive)): suffix = (suffix.lower() if isinstance(suffix, str) else tuple((item.lower() for item in suffix))) root = dir_path def _scandir(dir_path, suffix, recursive, case_sensitive): for entry in os.scandir(dir_path): if ((not entry.name.startswith('.')) and entry.is_file()): rel_path = osp.relpath(entry.path, root) _rel_path = (rel_path if case_sensitive else rel_path.lower()) if ((suffix is None) or _rel_path.endswith(suffix)): (yield rel_path) elif (recursive and os.path.isdir(entry.path)): (yield from _scandir(entry.path, suffix, recursive, case_sensitive)) return _scandir(dir_path, suffix, recursive, case_sensitive)
def find_vcs_root(path, markers=('.git',)): 'Finds the root directory (including itself) of specified markers.\n\n Args:\n path (str): Path of directory or file.\n markers (list[str], optional): List of file or directory names.\n\n Returns:\n The directory contained one of the markers or None if not found.\n ' if osp.isfile(path): path = osp.dirname(path) (prev, cur) = (None, osp.abspath(osp.expanduser(path))) while (cur != prev): if any((osp.exists(osp.join(cur, marker)) for marker in markers)): return cur (prev, cur) = (cur, osp.split(cur)[0]) return None
class ProgressBar(): 'A progress bar which can print the progress.' def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout): self.task_num = task_num self.bar_width = bar_width self.completed = 0 self.file = file if start: self.start() @property def terminal_width(self): (width, _) = get_terminal_size() return width def start(self): if (self.task_num > 0): self.file.write(f"[{(' ' * self.bar_width)}] 0/{self.task_num}, elapsed: 0s, ETA:") else: self.file.write('completed: 0, elapsed: 0s') self.file.flush() self.timer = Timer() def update(self, num_tasks=1): assert (num_tasks > 0) self.completed += num_tasks elapsed = self.timer.since_start() if (elapsed > 0): fps = (self.completed / elapsed) else: fps = float('inf') if (self.task_num > 0): percentage = (self.completed / float(self.task_num)) eta = int((((elapsed * (1 - percentage)) / percentage) + 0.5)) msg = f''' [{{}}] {self.completed}/{self.task_num}, {fps:.1f} task/s, elapsed: {int((elapsed + 0.5))}s, ETA: {eta:5}s''' bar_width = min(self.bar_width, (int((self.terminal_width - len(msg))) + 2), int((self.terminal_width * 0.6))) bar_width = max(2, bar_width) mark_width = int((bar_width * percentage)) bar_chars = (('>' * mark_width) + (' ' * (bar_width - mark_width))) self.file.write(msg.format(bar_chars)) else: self.file.write(f'completed: {self.completed}, elapsed: {int((elapsed + 0.5))}s, {fps:.1f} tasks/s') self.file.flush()
def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs): 'Track the progress of tasks execution with a progress bar.\n\n Tasks are done with a simple for-loop.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) results = [] for task in tasks: results.append(func(task, **kwargs)) prog_bar.update() prog_bar.file.write('\n') return results
def init_pool(process_num, initializer=None, initargs=None): if (initializer is None): return Pool(process_num) elif (initargs is None): return Pool(process_num, initializer) else: if (not isinstance(initargs, tuple)): raise TypeError('"initargs" must be a tuple') return Pool(process_num, initializer, initargs)
def track_parallel_progress(func, tasks, nproc, initializer=None, initargs=None, bar_width=50, chunksize=1, skip_first=False, keep_order=True, file=sys.stdout): 'Track the progress of parallel task execution with a progress bar.\n\n The built-in :mod:`multiprocessing` module is used for process pools and\n tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.\n\n Args:\n func (callable): The function to be applied to each task.\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n nproc (int): Process (worker) number.\n initializer (None or callable): Refer to :class:`multiprocessing.Pool`\n for details.\n initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for\n details.\n chunksize (int): Refer to :class:`multiprocessing.Pool` for details.\n bar_width (int): Width of progress bar.\n skip_first (bool): Whether to skip the first sample for each worker\n when estimating fps, since the initialization step may takes\n longer.\n keep_order (bool): If True, :func:`Pool.imap` is used, otherwise\n :func:`Pool.imap_unordered` is used.\n\n Returns:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') pool = init_pool(nproc, initializer, initargs) start = (not skip_first) task_num -= ((nproc * chunksize) * int(skip_first)) prog_bar = ProgressBar(task_num, bar_width, start, file=file) results = [] if keep_order: gen = pool.imap(func, tasks, chunksize) else: gen = pool.imap_unordered(func, tasks, chunksize) for result in gen: results.append(result) if skip_first: if (len(results) < (nproc * chunksize)): continue elif (len(results) == (nproc * chunksize)): prog_bar.start() continue prog_bar.update() prog_bar.file.write('\n') pool.close() pool.join() return results
def track_iter_progress(tasks, bar_width=50, file=sys.stdout): 'Track the progress of tasks iteration or enumeration with a progress\n bar.\n\n Tasks are yielded with a simple for-loop.\n\n Args:\n tasks (list or tuple[Iterable, int]): A list of tasks or\n (tasks, total num).\n bar_width (int): Width of progress bar.\n\n Yields:\n list: The task results.\n ' if isinstance(tasks, tuple): assert (len(tasks) == 2) assert isinstance(tasks[0], Iterable) assert isinstance(tasks[1], int) task_num = tasks[1] tasks = tasks[0] elif isinstance(tasks, Iterable): task_num = len(tasks) else: raise TypeError('"tasks" must be an iterable object or a (iterator, int) tuple') prog_bar = ProgressBar(task_num, bar_width, file=file) for task in tasks: (yield task) prog_bar.update() prog_bar.file.write('\n')
def build_from_cfg(cfg, registry, default_args=None): 'Build a module from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n registry (:obj:`Registry`): The registry to search the type from.\n default_args (dict, optional): Default initialization arguments.\n\n Returns:\n object: The constructed object.\n ' if (not isinstance(cfg, dict)): raise TypeError(f'cfg must be a dict, but got {type(cfg)}') if ('type' not in cfg): if ((default_args is None) or ('type' not in default_args)): raise KeyError(f'''`cfg` or `default_args` must contain the key "type", but got {cfg} {default_args}''') if (not isinstance(registry, Registry)): raise TypeError(f'registry must be an mmcv.Registry object, but got {type(registry)}') if (not (isinstance(default_args, dict) or (default_args is None))): raise TypeError(f'default_args must be a dict or None, but got {type(default_args)}') args = cfg.copy() if (default_args is not None): for (name, value) in default_args.items(): args.setdefault(name, value) obj_type = args.pop('type') if isinstance(obj_type, str): obj_cls = registry.get(obj_type) if (obj_cls is None): raise KeyError(f'{obj_type} is not in the {registry.name} registry') elif inspect.isclass(obj_type): obj_cls = obj_type else: raise TypeError(f'type must be a str or valid type, but got {type(obj_type)}') try: return obj_cls(**args) except Exception as e: raise type(e)(f'{obj_cls.__name__}: {e}')
class Registry(): "A registry to map strings to classes.\n\n Registered object could be built from registry.\n\n Example:\n >>> MODELS = Registry('models')\n >>> @MODELS.register_module()\n >>> class ResNet:\n >>> pass\n >>> resnet = MODELS.build(dict(type='ResNet'))\n\n Please refer to\n https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for\n advanced usage.\n\n Args:\n name (str): Registry name.\n build_func(func, optional): Build function to construct instance from\n Registry, func:`build_from_cfg` is used if neither ``parent`` or\n ``build_func`` is specified. If ``parent`` is specified and\n ``build_func`` is not given, ``build_func`` will be inherited\n from ``parent``. Default: None.\n parent (Registry, optional): Parent registry. The class registered in\n children registry could be built from parent. Default: None.\n scope (str, optional): The scope of registry. It is the key to search\n for children registry. If not specified, scope will be the name of\n the package where class is defined, e.g. mmdet, mmcls, mmseg.\n Default: None.\n " def __init__(self, name, build_func=None, parent=None, scope=None): self._name = name self._module_dict = dict() self._children = dict() self._scope = (self.infer_scope() if (scope is None) else scope) if (build_func is None): if (parent is not None): self.build_func = parent.build_func else: self.build_func = build_from_cfg else: self.build_func = build_func if (parent is not None): assert isinstance(parent, Registry) parent._add_children(self) self.parent = parent else: self.parent = None def __len__(self): return len(self._module_dict) def __contains__(self, key): return (self.get(key) is not None) def __repr__(self): format_str = (self.__class__.__name__ + f'(name={self._name}, items={self._module_dict})') return format_str @staticmethod def infer_scope(): "Infer the scope of registry.\n\n The name of the package where registry is defined will be returned.\n\n Example:\n >>> # in mmdet/models/backbone/resnet.py\n >>> MODELS = Registry('models')\n >>> @MODELS.register_module()\n >>> class ResNet:\n >>> pass\n The scope of ``ResNet`` will be ``mmdet``.\n\n Returns:\n str: The inferred scope name.\n " filename = inspect.getmodule(inspect.stack()[2][0]).__name__ split_filename = filename.split('.') return split_filename[0] @staticmethod def split_scope_key(key): "Split scope and key.\n\n The first scope will be split from key.\n\n Examples:\n >>> Registry.split_scope_key('mmdet.ResNet')\n 'mmdet', 'ResNet'\n >>> Registry.split_scope_key('ResNet')\n None, 'ResNet'\n\n Return:\n tuple[str | None, str]: The former element is the first scope of\n the key, which can be ``None``. The latter is the remaining key.\n " split_index = key.find('.') if (split_index != (- 1)): return (key[:split_index], key[(split_index + 1):]) else: return (None, key) @property def name(self): return self._name @property def scope(self): return self._scope @property def module_dict(self): return self._module_dict @property def children(self): return self._children def get(self, key): 'Get the registry record.\n\n Args:\n key (str): The class name in string format.\n\n Returns:\n class: The corresponding class.\n ' (scope, real_key) = self.split_scope_key(key) if ((scope is None) or (scope == self._scope)): if (real_key in self._module_dict): return self._module_dict[real_key] elif (scope in self._children): return self._children[scope].get(real_key) else: parent = self.parent while (parent.parent is not None): parent = parent.parent return parent.get(key) def build(self, *args, **kwargs): return self.build_func(*args, **kwargs, registry=self) def _add_children(self, registry): "Add children for a registry.\n\n The ``registry`` will be added as children based on its scope.\n The parent registry could build objects from children registry.\n\n Example:\n >>> models = Registry('models')\n >>> mmdet_models = Registry('models', parent=models)\n >>> @mmdet_models.register_module()\n >>> class ResNet:\n >>> pass\n >>> resnet = models.build(dict(type='mmdet.ResNet'))\n " assert isinstance(registry, Registry) assert (registry.scope is not None) assert (registry.scope not in self.children), f'scope {registry.scope} exists in {self.name} registry' self.children[registry.scope] = registry def _register_module(self, module_class, module_name=None, force=False): if (not inspect.isclass(module_class)): raise TypeError(f'module must be a class, but got {type(module_class)}') if (module_name is None): module_name = module_class.__name__ if isinstance(module_name, str): module_name = [module_name] for name in module_name: if ((not force) and (name in self._module_dict)): raise KeyError(f'{name} is already registered in {self.name}') self._module_dict[name] = module_class def deprecated_register_module(self, cls=None, force=False): warnings.warn('The old API of register_module(module, force=False) is deprecated and will be removed, please use the new API register_module(name=None, force=False, module=None) instead.', DeprecationWarning) if (cls is None): return partial(self.deprecated_register_module, force=force) self._register_module(cls, force=force) return cls def register_module(self, name=None, force=False, module=None): "Register a module.\n\n A record will be added to `self._module_dict`, whose key is the class\n name or the specified name, and value is the class itself.\n It can be used as a decorator or a normal function.\n\n Example:\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module()\n >>> class ResNet:\n >>> pass\n\n >>> backbones = Registry('backbone')\n >>> @backbones.register_module(name='mnet')\n >>> class MobileNet:\n >>> pass\n\n >>> backbones = Registry('backbone')\n >>> class ResNet:\n >>> pass\n >>> backbones.register_module(ResNet)\n\n Args:\n name (str | None): The module name to be registered. If not\n specified, the class name will be used.\n force (bool, optional): Whether to override an existing class with\n the same name. Default: False.\n module (type): Module class to be registered.\n " if (not isinstance(force, bool)): raise TypeError(f'force must be a boolean, but got {type(force)}') if isinstance(name, type): return self.deprecated_register_module(name, force=force) if (not ((name is None) or isinstance(name, str) or is_seq_of(name, str))): raise TypeError(f'name must be either of None, an instance of str or a sequence of str, but got {type(name)}') if (module is not None): self._register_module(module_class=module, module_name=name, force=force) return module def _register(cls): self._register_module(module_class=cls, module_name=name, force=force) return cls return _register
def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int): 'Function to initialize each worker.\n\n The seed of each worker equals to\n ``num_worker * rank + worker_id + user_seed``.\n\n Args:\n worker_id (int): Id for each worker.\n num_workers (int): Number of workers.\n rank (int): Rank in distributed training.\n seed (int): Random seed.\n ' worker_seed = (((num_workers * rank) + worker_id) + seed) np.random.seed(worker_seed) random.seed(worker_seed) torch.manual_seed(worker_seed)
def check_python_script(cmd): 'Run the python cmd script with `__main__`. The difference between\n `os.system` is that, this function exectues code in the current process, so\n that it can be tracked by coverage tools. Currently it supports two forms:\n\n - ./tests/data/scripts/hello.py zz\n - python tests/data/scripts/hello.py zz\n ' args = split(cmd) if (args[0] == 'python'): args = args[1:] with patch.object(sys, 'argv', args): run_path(args[0], run_name='__main__')
def _any(judge_result): 'Since built-in ``any`` works only when the element of iterable is not\n iterable, implement the function.' if (not isinstance(judge_result, Iterable)): return judge_result try: for element in judge_result: if _any(element): return True except TypeError: if judge_result: return True return False
def assert_dict_contains_subset(dict_obj: Dict[(Any, Any)], expected_subset: Dict[(Any, Any)]) -> bool: 'Check if the dict_obj contains the expected_subset.\n\n Args:\n dict_obj (Dict[Any, Any]): Dict object to be checked.\n expected_subset (Dict[Any, Any]): Subset expected to be contained in\n dict_obj.\n\n Returns:\n bool: Whether the dict_obj contains the expected_subset.\n ' for (key, value) in expected_subset.items(): if ((key not in dict_obj.keys()) or _any((dict_obj[key] != value))): return False return True
def assert_attrs_equal(obj: Any, expected_attrs: Dict[(str, Any)]) -> bool: 'Check if attribute of class object is correct.\n\n Args:\n obj (object): Class object to be checked.\n expected_attrs (Dict[str, Any]): Dict of the expected attrs.\n\n Returns:\n bool: Whether the attribute of class object is correct.\n ' for (attr, value) in expected_attrs.items(): if ((not hasattr(obj, attr)) or _any((getattr(obj, attr) != value))): return False return True
def assert_dict_has_keys(obj: Dict[(str, Any)], expected_keys: List[str]) -> bool: 'Check if the obj has all the expected_keys.\n\n Args:\n obj (Dict[str, Any]): Object to be checked.\n expected_keys (List[str]): Keys expected to contained in the keys of\n the obj.\n\n Returns:\n bool: Whether the obj has the expected keys.\n ' return set(expected_keys).issubset(set(obj.keys()))
def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool: 'Check if target_keys is equal to result_keys.\n\n Args:\n result_keys (List[str]): Result keys to be checked.\n target_keys (List[str]): Target keys to be checked.\n\n Returns:\n bool: Whether target_keys is equal to result_keys.\n ' return (set(result_keys) == set(target_keys))
def assert_is_norm_layer(module) -> bool: 'Check if the module is a norm layer.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the module is a norm layer.\n ' from torch.nn import GroupNorm, LayerNorm from .parrots_wrapper import _BatchNorm, _InstanceNorm norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm) return isinstance(module, norm_layer_candidates)
def assert_params_all_zeros(module) -> bool: 'Check if the parameters of the module is all zeros.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the parameters of the module is all zeros.\n ' weight_data = module.weight.data is_weight_zero = weight_data.allclose(weight_data.new_zeros(weight_data.size())) if (hasattr(module, 'bias') and (module.bias is not None)): bias_data = module.bias.data is_bias_zero = bias_data.allclose(bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return (is_weight_zero and is_bias_zero)
class TimerError(Exception): def __init__(self, message): self.message = message super(TimerError, self).__init__(message)
class Timer(): "A flexible Timer class.\n\n Examples:\n >>> import time\n >>> import mmcv\n >>> with mmcv.Timer():\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n 1.000\n >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):\n >>> # simulate a code block that will run for 1s\n >>> time.sleep(1)\n it takes 1.0 seconds\n >>> timer = mmcv.Timer()\n >>> time.sleep(0.5)\n >>> print(timer.since_start())\n 0.500\n >>> time.sleep(0.5)\n >>> print(timer.since_last_check())\n 0.500\n >>> print(timer.since_start())\n 1.000\n " def __init__(self, start=True, print_tmpl=None): self._is_running = False self.print_tmpl = (print_tmpl if print_tmpl else '{:.3f}') if start: self.start() @property def is_running(self): 'bool: indicate whether the timer is running' return self._is_running def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): print(self.print_tmpl.format(self.since_last_check())) self._is_running = False def start(self): 'Start the timer.' if (not self._is_running): self._t_start = time() self._is_running = True self._t_last = time() def since_start(self): 'Total time since the timer is started.\n\n Returns:\n float: Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') self._t_last = time() return (self._t_last - self._t_start) def since_last_check(self): 'Time since the last checking.\n\n Either :func:`since_start` or :func:`since_last_check` is a checking\n operation.\n\n Returns:\n float: Time in seconds.\n ' if (not self._is_running): raise TimerError('timer is not running') dur = (time() - self._t_last) self._t_last = time() return dur