code
stringlengths
17
6.64M
class Adafactor(torch.optim.Optimizer): 'Implements Adafactor algorithm.\n\n This implementation is based on:\n `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constans for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of\n final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square\n gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient\n (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if true, learning rate is scaled by root mean square of\n parameter (default: True)\n relative_step (bool): if true, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n ' def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False): defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group['lr'] if param_group['relative_step']: min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01) rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step']))) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps'][1], param_state['RMS']) return (param_scale * rel_step_sz) def _get_options(self, param_group, param_shape): factored = (len(param_shape) >= 2) use_first_moment = (param_group['beta1'] is not None) return (factored, use_first_moment) def _rms(self, tensor): return (tensor.norm(2) / (tensor.numel() ** 0.5)) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, output): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1)).unsqueeze((- 1))).rsqrt_().unsqueeze((- 1)) c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt() torch.mul(r_factor, c_factor, out=output) def step(self, closure=None): 'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] grad_shape = grad.shape (factored, use_first_moment) = self._get_options(group, grad_shape) if (len(state) == 0): state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).type_as(grad) state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).type_as(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].type_as(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].type_as(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].type_as(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].type_as(grad) p_data_fp32 = p.data.float() state['step'] += 1 state['RMS'] = self._rms(p_data_fp32) group['lr'] = self._get_lr(group, state) beta2t = (1.0 - math.pow(state['step'], group['decay_rate'])) update = ((grad ** 2) + group['eps'][0]) if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 1))) exp_avg_sq_col.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 2))) self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_((1.0 - beta2t), update) torch.rsqrt(exp_avg_sq, out=update).mul_(grad) update.div_(max(1.0, (self._rms(update) / group['clip_threshold']))) update.mul_(group['lr']) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_((1 - group['beta1']), update) update = exp_avg if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) p_data_fp32.add_((- update)) p.data.copy_(p_data_fp32) return loss
@register_optimizer('adagrad') class Adagrad(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' return {'lr': self.args.lr[0], 'weight_decay': self.args.weight_decay} @property def supports_flat_params(self): return True
@register_optimizer('adam') class FairseqAdam(FairseqOptimizer): 'Adam optimizer for fairseq.\n\n Important note: this optimizer corresponds to the "AdamW" variant of\n Adam in its weight decay behavior. As such, it is most closely\n analogous to torch.optim.AdamW from PyTorch.\n ' def __init__(self, args, params): super().__init__(args) fused_adam_cls = get_fused_adam_class() use_fused_adam = ((not getattr(args, 'use_old_adam', False)) and (fused_adam_cls is not None) and torch.cuda.is_available()) if use_fused_adam: logger.info('using FusedAdam') self._optimizer = fused_adam_cls(params, **self.optimizer_config) else: self._optimizer = Adam(params, **self.optimizer_config) @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--use-old-adam', action='store_true', default=False, help='Use fairseq.optim.adam.Adam') @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay} def average_params(self): 'Reduce Params is only used during BMUF distributed training.' state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for (_, value) in state_dict['state'].items(): value['exp_avg'] /= total_gpus value['exp_avg_sq'] /= total_gpus dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM) dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM)
class Adam(torch.optim.Optimizer): 'Implements Adam algorithm.\n\n This implementation is modified from torch.optim.Adam based on:\n `Fixed Weight Decay Regularization in Adam`\n (see https://arxiv.org/abs/1711.05101)\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n ' def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): 'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) if amsgrad: state['max_exp_avg_sq'] = state['max_exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_((1 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1) if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) p.data.copy_(p_data_fp32) return loss
@register_optimizer('adamax') class FairseqAdamax(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adamax(params, **self.optimizer_config) @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adamax-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction') @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' return {'lr': self.args.lr[0], 'betas': eval(self.args.adamax_betas), 'eps': self.args.adamax_eps, 'weight_decay': self.args.weight_decay, 'bias_correction': (not self.args.no_bias_correction)}
class Adamax(torch.optim.Optimizer): 'Implements Adamax algorithm (a variant of Adam based on infinity norm).\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`__.\n\n Compared to the version in PyTorch, this version implements a fix for weight decay.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 2e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n bias_correction (bool, optional): enable bias correction (default: True)\n\n __ https://arxiv.org/abs/1412.6980\n ' def __init__(self, params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, bias_correction=True): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if (not (0.0 <= weight_decay)): raise ValueError('Invalid weight_decay value: {}'.format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction) super(Adamax, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): 'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adamax does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_inf'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_inf'] = state['exp_inf'].type_as(p_data_fp32) (exp_avg, exp_inf) = (state['exp_avg'], state['exp_inf']) (beta1, beta2) = group['betas'] eps = group['eps'] state['step'] += 1 exp_avg.mul_(beta1).add_((1 - beta1), grad) torch.max(exp_inf.mul_(beta2), grad.abs_(), out=exp_inf) step_size = group['lr'] if group['bias_correction']: bias_correction = (1 - (beta1 ** state['step'])) step_size /= bias_correction if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) p_data_fp32.addcdiv_((- step_size), exp_avg, exp_inf.add(eps)) p.data.copy_(p_data_fp32) return loss
class FairseqBMUF(FairseqOptimizer): '\n Implements incremental block distributed data parallelism similar to\n https://ieeexplore.ieee.org/document/7472805\n\n Paper title: Scalable training of deep learning machines by incremental\n block training with intra-block parallel optimization and blockwise\n model-update filtering\n ' def __init__(self, args, optimizer): super().__init__(args) self._optimizer = optimizer self._num_updates = 0 self.sync_iter = self.args.global_sync_iter self.block_momentum = self.args.block_momentum self.block_lr = self.args.block_lr self._reset_local_data() self.warmup_iteration = self.args.warmup_iterations self.use_nbm = self.args.use_nbm self.initial_state = self._optimizer.state_dict() self.average_sync = self.args.average_sync @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--block-lr', default=1, type=float, help='block learning rate for bmuf') parser.add_argument('--block-momentum', default=0.875, type=float, help='block momentum for bmuf') parser.add_argument('--global-sync-iter', default=50, type=int, help='Iteration for syncing global model') parser.add_argument('--warmup-iterations', default=500, type=int, help='warmup iterations for model to broadcast') parser.add_argument('--use-nbm', default=False, action='store_true', help='Specify whether you want to use classical BM / Nesterov BM') parser.add_argument('--average-sync', default=False, action='store_true', help='Specify whether you want to average the local momentum after each sync') @property def optimizer(self): return self._optimizer.optimizer @property def optimizer_config(self): return self._optimizer.optimizer_config def get_lr(self): return self._optimizer.get_lr() def set_lr(self, lr): self._optimizer.set_lr(lr) def state_dict(self): return self._optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): self._optimizer.load_state_dict(state_dict, optimizer_overrides) self.initial_state = self._optimizer.state_dict() def multiply_grads(self, c): 'Multiplies grads by a constant *c*.' self._optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm): 'Clips gradient norm.' return self._optimizer.clip_grad_norm(max_norm) def average_params(self): self._optimizer.average_params() def _block_sync(self): if (self.block_momentum != 0): self._calc_grad() self._avg_grad_from_all_gpus() if (self.block_momentum != 0): self._update_global_model() if self.average_sync: self.average_params() def _is_warmup_end(self): if (self.get_num_updates() == self.warmup_iteration): return True return False def _is_bmuf_iter(self): if ((self.get_num_updates() > self.warmup_iteration) and ((self.get_num_updates() % self.sync_iter) == 0)): return True return False def _warmup_sync(self, root_rank=0): for param in self.params: dist.broadcast(param.data, src=root_rank) if self.average_sync: self._optimizer.average_params() else: self._optimizer.load_state_dict(self.initial_state) self._reset_local_data() def step(self, closure=None): 'Performs a single optimization step.' self._optimizer.step(closure) self.set_num_updates((self.get_num_updates() + 1)) if self._is_warmup_end(): self._warmup_sync() elif self._is_bmuf_iter(): self._block_sync() def zero_grad(self): 'Clears the gradients of all optimized parameters.' self._optimizer.zero_grad() def get_num_updates(self): 'Get the number of parameters updates.' return self._num_updates def set_num_updates(self, num_updates): 'Set the number of parameters updates.' self._num_updates = num_updates @torch.no_grad() def _reset_local_data(self): self.global_params = [torch.zeros_like(p.data) for p in self.params] self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params] self.grads = [p.data.new_zeros(p.data.size()) for p in self.params] for (param, global_param) in zip(self.params, self.global_params): global_param.copy_(param.data) @torch.no_grad() def _calc_grad(self): for (index, (param, global_param)) in enumerate(zip(self.params, self.global_params)): self.grads[index] = (global_param - param.data) def _avg_grad_from_all_gpus(self): for (index, param) in enumerate(self.params): sync_para = (param.data if (self.block_momentum == 0) else self.grads[index]) sync_para /= float(dist.get_world_size()) dist.all_reduce(sync_para, op=dist.ReduceOp.SUM) @torch.no_grad() def _update_global_model(self): for (index, (param, global_param, smoothed_grad, grad)) in enumerate(zip(self.params, self.global_params, self.smoothed_grads, self.grads)): smoothed_grad = ((self.block_momentum * smoothed_grad) + (self.block_lr * grad)) param.data.copy_((global_param - smoothed_grad)) if self.use_nbm: param.data.copy_((param.data - (self.block_momentum * smoothed_grad))) self.smoothed_grads[index] = smoothed_grad global_param.copy_(param.data)
class FairseqOptimizer(object): def __init__(self, args): super().__init__() self.args = args @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' pass @property def optimizer(self): 'Return a torch.optim.optimizer.Optimizer instance.' if (not hasattr(self, '_optimizer')): raise NotImplementedError if (not isinstance(self._optimizer, torch.optim.Optimizer)): raise ValueError('_optimizer must be an instance of torch.optim.Optimizer') return self._optimizer @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' raise NotImplementedError @property def params(self): 'Return an iterable of the parameters held by the optimizer.' for param_group in self.optimizer.param_groups: for p in param_group['params']: (yield p) def __getstate__(self): return self._optimizer.__getstate__() def get_lr(self): 'Return the current learning rate.' return self.optimizer.param_groups[0]['lr'] def set_lr(self, lr): 'Set the learning rate.' for param_group in self.optimizer.param_groups: param_group['lr'] = lr def state_dict(self): "Return the optimizer's state dict." return self.optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): 'Load an optimizer state dict.\n\n In general we should prefer the configuration of the existing optimizer\n instance (e.g., learning rate) over that found in the state_dict. This\n allows us to resume training from a checkpoint using a new set of\n optimizer args.\n ' self.optimizer.load_state_dict(state_dict) if ((optimizer_overrides is not None) and (len(optimizer_overrides) > 0)): for group in self.optimizer.param_groups: group.update(optimizer_overrides) def backward(self, loss): 'Computes the sum of gradients of the given tensor w.r.t. graph leaves.' loss.backward() def multiply_grads(self, c): 'Multiplies grads by a constant *c*.' for p in self.params: if (p.grad is not None): p.grad.data.mul_(c) def clip_grad_norm(self, max_norm): 'Clips gradient norm.' return utils.clip_grad_norm_(self.params, max_norm) def step(self, closure=None): 'Performs a single optimization step.' self.optimizer.step(closure) def zero_grad(self): 'Clears the gradients of all optimized parameters.' for p in self.params: p.grad = None self.optimizer.zero_grad() @property def supports_memory_efficient_fp16(self): if hasattr(self.optimizer, 'supports_memory_efficient_fp16'): return self.optimizer.supports_memory_efficient_fp16 return False @property def supports_flat_params(self): '\n Whether the optimizer supports collapsing of the model\n parameters/gradients into a single contiguous Tensor.\n ' if hasattr(self.optimizer, 'supports_flat_params'): return self.optimizer.supports_flat_params return False def average_params(self): pass
class DynamicLossScaler(object): def __init__(self, init_scale=(2.0 ** 15), scale_factor=2.0, scale_window=2000, tolerance=0.05, threshold=None): self.loss_scale = init_scale self.scale_factor = scale_factor self.scale_window = scale_window self.tolerance = tolerance self.threshold = threshold self._iter = 0 self._last_overflow_iter = (- 1) self._last_rescale_iter = (- 1) self._overflows_since_rescale = 0 def update_scale(self, overflow): iter_since_rescale = (self._iter - self._last_rescale_iter) if overflow: self._last_overflow_iter = self._iter self._overflows_since_rescale += 1 pct_overflow = (self._overflows_since_rescale / float(iter_since_rescale)) if (pct_overflow >= self.tolerance): self._decrease_loss_scale() self._last_rescale_iter = self._iter self._overflows_since_rescale = 0 elif (((self._iter - self._last_overflow_iter) % self.scale_window) == 0): self.loss_scale *= self.scale_factor self._last_rescale_iter = self._iter self._iter += 1 def _decrease_loss_scale(self): self.loss_scale /= self.scale_factor if (self.threshold is not None): self.loss_scale = max(self.loss_scale, self.threshold) @staticmethod def has_overflow(grad_norm): if ((grad_norm == float('inf')) or (grad_norm != grad_norm)): return True return False
class _FP16OptimizerMixin(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def has_flat_params(self): return torch.is_tensor(self.fp32_params) @classmethod def build_fp32_params(cls, params, flatten=True): if flatten: total_param_size = sum((p.data.numel() for p in params)) fp32_params = torch.zeros(total_param_size, dtype=torch.float, device=params[0].device) offset = 0 for p in params: numel = p.data.numel() fp32_params[offset:(offset + numel)].copy_(p.data.view((- 1))) offset += numel fp32_params = torch.nn.Parameter(fp32_params) fp32_params.grad = fp32_params.data.new(total_param_size) return fp32_params else: fp32_params = [] for p in params: p32 = torch.nn.Parameter(p.data.float()) p32.grad = torch.zeros_like(p32.data) fp32_params.append(p32) return fp32_params def state_dict(self): "Return the optimizer's state dict." state_dict = self.fp32_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): 'Load an optimizer state dict.\n\n In general we should prefer the configuration of the existing optimizer\n instance (e.g., learning rate) over that found in the state_dict. This\n allows us to resume training from a checkpoint using a new set of\n optimizer args.\n ' if ('loss_scale' in state_dict): self.scaler.loss_scale = state_dict['loss_scale'] self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) def backward(self, loss): 'Computes the sum of gradients of the given tensor w.r.t. graph leaves.\n\n Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this\n function additionally dynamically scales the loss to avoid gradient\n underflow.\n ' loss = (loss * self.scaler.loss_scale) loss.backward() self._needs_sync = True def _sync_fp16_grads_to_fp32(self, multiply_grads=1.0): if self._needs_sync: multiply_grads /= self.scaler.loss_scale if self.has_flat_params: offset = 0 for p in self.fp16_params: if (not p.requires_grad): continue grad_data = (p.grad.data if (p.grad is not None) else p.data.new_zeros(p.data.shape)) numel = grad_data.numel() self.fp32_params.grad.data[offset:(offset + numel)].copy_(grad_data.view((- 1))) offset += numel self.fp32_params.grad.data.mul_(multiply_grads) else: for (p, p32) in zip(self.fp16_params, self.fp32_params): if (not p.requires_grad): continue if (p.grad is not None): p32.grad.data.copy_(p.grad.data) p32.grad.data.mul_(multiply_grads) else: p32.grad = torch.zeros_like(p.data, dtype=torch.float) self._needs_sync = False def multiply_grads(self, c): 'Multiplies grads by a constant ``c``.' if self._needs_sync: self._sync_fp16_grads_to_fp32(c) elif self.has_flat_params: self.fp32_params.grad.data.mul_(c) else: for p32 in self.fp32_params: p32.grad.data.mul_(c) def clip_grad_norm(self, max_norm): 'Clips gradient norm and updates dynamic loss scaler.' self._sync_fp16_grads_to_fp32() if self.has_flat_params: grad_norm = utils.clip_grad_norm_([self.fp32_params.grad.data], max_norm) else: grad_norm = utils.clip_grad_norm_(self.fp32_params, max_norm) overflow = DynamicLossScaler.has_overflow(grad_norm) self.scaler.update_scale(overflow) if overflow: if (self.scaler.loss_scale <= self.min_loss_scale): raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.min_loss_scale)) raise OverflowError(('setting loss scale to: ' + str(self.scaler.loss_scale))) return grad_norm def step(self, closure=None): 'Performs a single optimization step.' self._sync_fp16_grads_to_fp32() self.fp32_optimizer.step(closure) if self.has_flat_params: offset = 0 for p in self.fp16_params: if (not p.requires_grad): continue numel = p.data.numel() p.data.copy_(self.fp32_params.data[offset:(offset + numel)].view_as(p.data)) offset += numel else: for (p, p32) in zip(self.fp16_params, self.fp32_params): if (not p.requires_grad): continue p.data.copy_(p32.data) def zero_grad(self): 'Clears the gradients of all optimized parameters.' for p in self.fp16_params: p.grad = None if self.has_flat_params: self.fp32_params.grad.zero_() else: for p32 in self.fp32_params: p32.grad.zero_() self._needs_sync = False
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer): '\n Wrap an *optimizer* to support FP16 (mixed precision) training.\n ' def __init__(self, args, params, fp32_optimizer, fp32_params): super().__init__(args) self.fp16_params = params self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params if (getattr(args, 'fp16_scale_window', None) is None): if (len(args.update_freq) > 1): raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule') scale_window = int((((2 ** 14) / args.distributed_world_size) / args.update_freq[0])) else: scale_window = args.fp16_scale_window self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale) self.min_loss_scale = self.args.min_loss_scale @classmethod def build_optimizer(cls, args, params): '\n Args:\n args (argparse.Namespace): fairseq args\n params (iterable): iterable of parameters to optimize\n ' flatten = (not getattr(args, 'fp16_no_flatten_grads', False)) fp32_params = cls.build_fp32_params(params, flatten=flatten) if flatten: fp32_optimizer = optim.build_optimizer(args, [fp32_params]) else: fp32_optimizer = optim.build_optimizer(args, fp32_params) if (flatten and (not fp32_optimizer.supports_flat_params)): raise RuntimeError('chosen optimizer does not support flat params, please set --fp16-no-flatten-grads') return cls(args, params, fp32_optimizer, fp32_params) @property def optimizer(self): return self.fp32_optimizer.optimizer @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr)
class _MemoryEfficientFP16OptimizerMixin(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @property def has_flat_params(self): return False def state_dict(self): "Return the optimizer's state dict." state_dict = self.wrapped_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): 'Load an optimizer state dict.\n\n In general we should prefer the configuration of the existing optimizer\n instance (e.g., learning rate) over that found in the state_dict. This\n allows us to resume training from a checkpoint using a new set of\n optimizer args.\n ' if ('loss_scale' in state_dict): self.scaler.loss_scale = state_dict['loss_scale'] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) groups = self.optimizer.param_groups saved_groups = state_dict['param_groups'] id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))} for (k, v) in state_dict['state'].items(): if (k in id_map): param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): 'Computes the sum of gradients of the given tensor w.r.t. graph leaves.\n\n Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this\n function additionally dynamically scales the loss to avoid gradient\n underflow.\n ' loss = (loss * self.scaler.loss_scale) loss.backward() self._grads_are_scaled = True def _unscale_grads(self, multiply_grads=1.0): if self._grads_are_scaled: self._grads_are_scaled = False self.wrapped_optimizer.multiply_grads((multiply_grads / self.scaler.loss_scale)) else: assert (multiply_grads == 1.0) def multiply_grads(self, c): 'Multiplies grads by a constant *c*.' if self._grads_are_scaled: self._unscale_grads(c) else: self.wrapped_optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm): 'Clips gradient norm and updates dynamic loss scaler.' self._unscale_grads() grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm) overflow = DynamicLossScaler.has_overflow(grad_norm) self.scaler.update_scale(overflow) if overflow: if (self.scaler.loss_scale <= self.min_loss_scale): raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.min_loss_scale)) raise OverflowError(('setting loss scale to: ' + str(self.scaler.loss_scale))) return grad_norm def step(self, closure=None): 'Performs a single optimization step.' self._unscale_grads() self.wrapped_optimizer.step(closure) def zero_grad(self): 'Clears the gradients of all optimized parameters.' self.wrapped_optimizer.zero_grad() self._grads_are_scaled = False
class MemoryEfficientFP16Optimizer(_MemoryEfficientFP16OptimizerMixin, optim.FairseqOptimizer): '\n Wrap an *optimizer* to support FP16 (mixed precision) training.\n\n Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not\n maintain an FP32 copy of the model. We instead expect the optimizer to\n convert the gradients to FP32 internally and sync the results back to the\n FP16 model params. This significantly reduces memory usage but slightly\n increases the time spent in the optimizer.\n\n Since this wrapper depends on specific functionality in the wrapped\n optimizer (i.e., on-the-fly conversion of grads to FP32), only certain\n optimizers can be wrapped. This is determined by the\n *supports_memory_efficient_fp16* property.\n ' def __init__(self, args, params, optimizer): if (not optimizer.supports_memory_efficient_fp16): raise ValueError('Unsupported optimizer: {}'.format(optimizer.__class__.__name__)) super().__init__(args) self.wrapped_optimizer = optimizer if (getattr(args, 'fp16_scale_window', None) is None): if (len(args.update_freq) > 1): raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule') scale_window = (((2 ** 14) / args.distributed_world_size) / args.update_freq[0]) else: scale_window = args.fp16_scale_window self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale) self.min_loss_scale = self.args.min_loss_scale @classmethod def build_optimizer(cls, args, params): '\n Args:\n args (argparse.Namespace): fairseq args\n params (iterable): iterable of parameters to optimize\n ' fp16_optimizer = optim.build_optimizer(args, params) return cls(args, params, fp16_optimizer) @property def optimizer(self): return self.wrapped_optimizer.optimizer @property def optimizer_config(self): return self.wrapped_optimizer.optimizer_config def get_lr(self): return self.wrapped_optimizer.get_lr() def set_lr(self, lr): self.wrapped_optimizer.set_lr(lr)
def get_fused_adam_class(): '\n Look for the FusedAdam optimizer from apex. We first try to load the\n "contrib" interface, which is a bit faster than the main interface,\n but is technically deprecated.\n ' try: global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module('fused_adam_cuda') return FusedAdamV1 except ImportError: try: from apex.optimizers import FusedAdam as _FusedAdam return FusedAdamV2 except ImportError: pass return None
class FusedAdamV1(torch.optim.Optimizer): "\n Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via\n ``python setup.py install --cuda_ext --cpp_ext``.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Compared to the original version in Apex, the fairseq version casts grads\n and params to FP32 internally to support ``--memory-efficient-fp16``.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square. (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False) NOT SUPPORTED in FusedAdam!\n eps_inside_sqrt (boolean, optional): in the 'update parameters' step,\n adds eps to the bias-corrected second moment estimate before\n evaluating square root instead of adding it to the square root of\n second moment estimate as in the original paper. (default: False)\n .. _Adam: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n " def __init__(self, params, lr=0.001, bias_correction=True, betas=(0.9, 0.999), eps=1e-08, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module('fused_adam_cuda') if amsgrad: raise RuntimeError('FusedAdam does not support the AMSGrad variant.') defaults = {'lr': lr, 'bias_correction': bias_correction, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay, 'max_grad_norm': max_grad_norm} super().__init__(params, defaults) self.eps_mode = (0 if eps_inside_sqrt else 1) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None, grads=None, scale=1.0, grad_norms=None): 'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n grads (list of tensors, optional): weight gradient to use for the\n optimizer update. If gradients have type torch.half, parameters\n are expected to be in type torch.float. (default: None)\n output params (list of tensors, optional): A reduced precision copy\n of the updated weights written out in addition to the regular\n updated weights. Have to be of same type as gradients. (default: None)\n scale (float, optional): factor to divide gradient tensor values\n by before applying to weights. (default: 1)\n ' loss = None if (closure is not None): loss = closure() if (grads is None): grads_group = ([None] * len(self.param_groups)) elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif (type(grads[0]) != list): grads_group = [grads] else: grads_group = grads if (grad_norms is None): grad_norms = ([None] * len(self.param_groups)) for (group, grads_this_group, grad_norm) in zip(self.param_groups, grads_group, grad_norms): if (grads_this_group is None): grads_this_group = ([None] * len(group['params'])) combined_scale = scale if (group['max_grad_norm'] > 0): clip = (((grad_norm / scale) + 1e-06) / group['max_grad_norm']) if (clip > 1): combined_scale = (clip * scale) bias_correction = (1 if group['bias_correction'] else 0) for (p, grad) in zip(group['params'], grads_this_group): if ((p.grad is None) and (grad is None)): continue if (grad is None): grad = p.grad.data if grad.is_sparse: raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) exp_avg = state['exp_avg'] exp_avg_sq = state['exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 out_p = p.data with torch.cuda.device(p.device): fused_adam_cuda.adam(p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, beta2, group['eps'], combined_scale, state['step'], self.eps_mode, bias_correction, group['weight_decay']) return loss
@register_optimizer('lamb') class FairseqLAMB(FairseqOptimizer): 'LAMB optimizer.' def __init__(self, args, params): super().__init__(args) try: from apex.optimizers import FusedLAMB self._optimizer = FusedLAMB(params, **self.optimizer_config) except ImportError: raise ImportError('Please install apex to use LAMB optimizer') @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', help='betas for LAMB optimizer') parser.add_argument('--lamb-eps', type=float, default=1e-08, metavar='D', help='epsilon for LAMB optimizer') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' return {'lr': self.args.lr[0], 'betas': eval(self.args.lamb_betas), 'eps': self.args.lamb_eps, 'weight_decay': self.args.weight_decay} @property def supports_flat_params(self): return False
@register_lr_scheduler('cosine') class CosineSchedule(FairseqLRScheduler): 'Assign LR based on a cyclical schedule that follows the cosine function.\n\n See https://arxiv.org/pdf/1608.03983.pdf for details.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n max learning rate (``--max-lr``).\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i))\n\n where ``t_curr`` is current percentage of updates within the current period\n range and ``t_i`` is the current period range, which is scaled by ``t_mul``\n after every iteration.\n ' def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with cosine. Consider --lr-scheduler=fixed instead.') warmup_end_lr = args.max_lr if (args.warmup_init_lr < 0): args.warmup_init_lr = args.lr[0] self.min_lr = args.lr[0] self.max_lr = args.max_lr assert (self.max_lr > self.min_lr), 'max_lr must be more than lr' self.t_mult = args.t_mult self.period = args.lr_period_updates if (self.period <= 0): assert (args.max_update >= 0), 'Either --max_update or --lr-period-updates must be set' self.period = (args.max_update - args.warmup_updates) if (args.warmup_updates > 0): self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates) else: self.lr_step = 1 self.warmup_updates = args.warmup_updates self.lr_shrink = args.lr_shrink self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') parser.add_argument('--max-lr', type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period') parser.add_argument('--lr-period-updates', default=(- 1), type=float, metavar='LR', help='initial number of updates per period') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing') def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' if (num_updates < self.args.warmup_updates): self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step)) else: curr_updates = (num_updates - self.args.warmup_updates) if (self.t_mult != 1): i = math.floor(math.log((1 - ((curr_updates / self.period) * (1 - self.t_mult))), self.t_mult)) t_i = ((self.t_mult ** i) * self.period) t_curr = (curr_updates - (((1 - (self.t_mult ** i)) / (1 - self.t_mult)) * self.period)) else: i = math.floor((curr_updates / self.period)) t_i = self.period t_curr = (curr_updates - (self.period * i)) lr_shrink = (self.lr_shrink ** i) min_lr = (self.min_lr * lr_shrink) max_lr = (self.max_lr * lr_shrink) self.lr = (min_lr + ((0.5 * (max_lr - min_lr)) * (1 + math.cos(((math.pi * t_curr) / t_i))))) self.optimizer.set_lr(self.lr) return self.lr def __repr__(self): class_name = self.__class__.__name__ s = '{}'.format(class_name) if (self.warmup_updates > 0): s += '\n \t LR changes from {} to {} in {} steps'.format(self.lr, self.max_lr, self.warmup_updates) s += '\n \t LR changes from {} to {} (period: {})'.format(self.max_lr, self.min_lr, self.period) return s
class FairseqLRScheduler(object): def __init__(self, args, optimizer): super().__init__() if (not isinstance(optimizer, FairseqOptimizer)): raise ValueError('optimizer must be an instance of FairseqOptimizer') self.args = args self.optimizer = optimizer self.best = None @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' pass def state_dict(self): 'Return the LR scheduler state dict.' return {'best': self.best} def load_state_dict(self, state_dict): 'Load an LR scheduler state dict.' self.best = state_dict['best'] def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' if (val_loss is not None): if (self.best is None): self.best = val_loss else: self.best = min(self.best, val_loss) def step_update(self, num_updates): 'Update the learning rate after each update.' return self.optimizer.get_lr()
@register_lr_scheduler('fixed') class FixedSchedule(FairseqLRScheduler): 'Decay the LR on a fixed schedule.' def __init__(self, args, optimizer): super().__init__(args, optimizer) args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0) self.lr = args.lr[0] if (args.warmup_updates > 0): self.warmup_factor = (1.0 / args.warmup_updates) else: self.warmup_factor = 1 @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') def get_next_lr(self, epoch): lrs = self.args.lr if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)): next_lr = lrs[min(epoch, (len(lrs) - 1))] else: next_lr = (lrs[(- 1)] * (self.args.lr_shrink ** ((epoch + 1) - self.args.force_anneal))) return next_lr def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) self.lr = self.get_next_lr(epoch) self.optimizer.set_lr((self.warmup_factor * self.lr)) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' if ((self.args.warmup_updates > 0) and (num_updates < self.args.warmup_updates)): self.warmup_factor = ((num_updates + 1) / float(self.args.warmup_updates)) self.optimizer.set_lr((self.warmup_factor * self.lr)) return self.optimizer.get_lr()
@register_lr_scheduler('inverse_sqrt') class InverseSquareRootSchedule(FairseqLRScheduler): 'Decay the LR based on the inverse square root of the update number.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter we decay proportional to the number of\n updates, with a decay factor set to align with the configured learning rate.\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n decay_factor = args.lr * sqrt(args.warmup_updates)\n lr = decay_factor / sqrt(update_num)\n ' def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with inverse_sqrt. Consider --lr-scheduler=fixed instead.') warmup_end_lr = args.lr[0] if (args.warmup_init_lr < 0): args.warmup_init_lr = (0 if (args.warmup_updates > 0) else warmup_end_lr) self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates) self.decay_factor = (warmup_end_lr * (args.warmup_updates ** 0.5)) self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' if (num_updates < self.args.warmup_updates): self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step)) else: self.lr = (self.decay_factor * (num_updates ** (- 0.5))) self.optimizer.set_lr(self.lr) return self.lr
@register_lr_scheduler('linear') class LinearSchedule(FairseqLRScheduler): 'Decay the LR linearly based on the update number.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter we decay proportional to the number of\n updates, with a decay factor set to align with the configured learning rate.\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n decay_factor = args.lr * sqrt(args.warmup_updates)\n lr = decay_factor / sqrt(update_num)\n ' def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with linear. Consider --lr-scheduler=fixed instead.') total_updates = args.max_update if (total_updates <= 0): raise ValueError('Cannot use linear scheduler with --max-updates <=0Consider passing a value for --max-updates argument.e.g., --max-updates=300000') min_lr = args.warmup_init_lr if (min_lr < 0): raise ValueError('Cannot use linear scheduler with --warmup-init-lr <0Consider using a value >=0.A good value is 1e-7') max_lr = args.lr[0] if (max_lr <= 0): raise ValueError('Cannot use linear scheduler with --lr <= 0Consider using a positive value.A good value is 0.0014') lr_steps = [] warm_up_steps = args.warmup_updates if (warm_up_steps > 0): lr_steps = (lr_steps + np.linspace(min_lr, max_lr, warm_up_steps).tolist()) lr_steps = (lr_steps + np.linspace(max_lr, min_lr, ((total_updates - warm_up_steps) + 1)).tolist()) self.lr_steps = lr_steps self.max_lr_updates = len(self.lr_steps) self.warmup_steps = warm_up_steps self.reamining_steps = ((total_updates - args.warmup_updates) + 1) self.max_lr = max_lr self.min_lr = min_lr self.lr = min_lr self.optimizer.set_lr(min_lr) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--warmup-updates', default=8000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=1e-07, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' if (num_updates < self.max_lr_updates): self.lr = self.lr_steps[num_updates] else: self.lr = self.min_lr self.optimizer.set_lr(self.lr) return self.lr def __repr__(self): class_name = self.__class__.__name__ s = '{}'.format(class_name) if (self.warmup_steps > 0): s += '\n \t LR changes from {} to {} in {} steps'.format(self.min_lr, self.max_lr, self.warmup_steps) s += '\n \t LR changes from {} to {} in {} steps'.format(self.max_lr, self.min_lr, self.reamining_steps) return s
@register_lr_scheduler('polynomial_decay') class PolynomialDecaySchedule(FairseqLRScheduler): 'Decay the LR on a fixed schedule.' def __init__(self, args, optimizer): super().__init__(args, optimizer) args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0) self.lr = args.lr[0] if (args.warmup_updates > 0): self.warmup_factor = (1.0 / args.warmup_updates) else: self.warmup_factor = 1 self.end_learning_rate = args.end_learning_rate self.total_num_update = args.total_num_update self.power = args.power self.optimizer.set_lr((self.warmup_factor * self.lr)) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--end-learning-rate', default=0.0, type=float) parser.add_argument('--power', default=1.0, type=float) parser.add_argument('--total-num-update', default=1000000, type=int) def get_next_lr(self, epoch): lrs = self.args.lr if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)): next_lr = lrs[min(epoch, (len(lrs) - 1))] else: next_lr = self.optimizer.get_lr() return next_lr def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) self.lr = self.get_next_lr(epoch) self.optimizer.set_lr((self.warmup_factor * self.lr)) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' if ((self.args.warmup_updates > 0) and (num_updates <= self.args.warmup_updates)): self.warmup_factor = (num_updates / float(self.args.warmup_updates)) lr = (self.warmup_factor * self.lr) elif (num_updates >= self.total_num_update): lr = self.end_learning_rate else: warmup = self.args.warmup_updates lr_range = (self.lr - self.end_learning_rate) pct_remaining = (1 - ((num_updates - warmup) / (self.total_num_update - warmup))) lr = ((lr_range * (pct_remaining ** self.power)) + self.end_learning_rate) self.optimizer.set_lr(lr) return self.optimizer.get_lr()
@register_lr_scheduler('reduce_lr_on_plateau') class ReduceLROnPlateau(FairseqLRScheduler): '\n Decay the LR by a factor every time the validation loss plateaus.\n Also comes with optional warmup phase, where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n ' def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with reduce_lr_on_plateau. Consider --lr-scheduler=fixed instead.') self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer.optimizer, patience=0, factor=args.lr_shrink, threshold=args.lr_threshold) warmup_end_lr = args.lr[0] 'if no warm up, sets initial lr to be args.lr[0]' if (args.warmup_init_lr < 0): args.warmup_init_lr = (0 if (args.warmup_updates > 0) else warmup_end_lr) ' linearly warmup for the first args.warmup_updates' if (args.warmup_updates > 0): self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates) ' this flag is either set from arg when no warm up, or set by step_update() when warmup finishes' self.warmup_end = (True if (args.warmup_updates <= 0) else False) ' initial learning rate' 'this self.lr is used only during init and/or warm up period' self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)') parser.add_argument('--lr-threshold', default=0.0001, type=float, metavar='LT', help='Threshold for measuring the new optimum, to only focus on significant changes') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') def state_dict(self): 'Return the LR scheduler state dict.' return {'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch} def load_state_dict(self, state_dict): 'Load an LR scheduler state dict.' self.lr_scheduler.best = state_dict['best'] if ('last_epoch' in state_dict): self.lr_scheduler.last_epoch = state_dict['last_epoch'] def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch if warmup finishes' ' otherwise no update of lr on epoch boundaries' if ((val_loss is not None) and (self.warmup_end is True)): self.lr_scheduler.step(val_loss, epoch) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' ' if there is warmup' if (self.args.warmup_updates > 0): if (num_updates <= self.args.warmup_updates): self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step)) self.optimizer.set_lr(self.lr) elif (self.warmup_end is False): self.warmup_end = True 'else do nothing ' return self.optimizer.get_lr()
@register_lr_scheduler('tri_stage') class TriStageLRSchedule(FairseqLRScheduler): 'Tristage learning rate schedulr\n\n Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf\n\n Similar to inverse_squre_root scheduler, but tri_stage learning rate employs\n three stages LR scheduling:\n\n - warmup stage, starting from `lr` * `init_lr_scale`, linearly\n increased to `lr` in `warmup_steps` iterations\n\n - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps`\n iterations\n\n - decay stage, after hold stage, decay LR exponetially to\n `lr` * `final_lr_scale` in `decay_steps`;\n after that LR is keep as `final_lr_scale` * `lr`\n\n During warmup::\n\n init_lr = args.init_lr_scale * args.lr\n lrs = torch.linspace(init_lr, args.lr, args.warmup_steps)\n lr = lrs[update_num]\n\n During hold::\n\n lr = args.lr\n\n During decay::\n\n decay_factor = - math.log(args.final_lr_scale) / args.decay_steps\n lr = args.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor)\n\n After that::\n\n lr = args.lr * args.final_lr_scale\n ' def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with tri-stage lr. Consider --lr-scheduler=fixed instead.') self.peak_lr = args.lr[0] self.init_lr = (args.init_lr_scale * args.lr[0]) self.final_lr = (args.final_lr_scale * args.lr[0]) self.warmup_steps = args.warmup_steps self.hold_steps = args.hold_steps self.decay_steps = args.decay_steps self.warmup_rate = ((self.peak_lr - self.init_lr) / self.warmup_steps) self.decay_factor = ((- math.log(args.final_lr_scale)) / args.decay_steps) self.lr = self.init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--warmup-steps', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--hold-steps', default=20000, type=int, metavar='N', help='steps in hold stage.') parser.add_argument('--decay-steps', default=60000, type=int, metavar='N', help='steps in decay stages') parser.add_argument('--init-lr-scale', default=0.01, type=float, help='\n initial learning rate scale during warmup phase; default is 0.01') parser.add_argument('--final-lr-scale', default=0.01, type=float, help='final learning rate scale; default to 0.01') def _decide_stage(self, update_step): '\n return stage, and the corresponding steps within the current stage\n ' if (update_step < self.warmup_steps): return (0, update_step) offset = self.warmup_steps if (update_step < (offset + self.hold_steps)): return (1, (update_step - offset)) offset += self.hold_steps if (update_step <= (offset + self.decay_steps)): return (2, (update_step - offset)) offset += self.decay_steps return (3, (update_step - offset)) def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' (stage, steps_in_stage) = self._decide_stage(num_updates) if (stage == 0): self.lr = (self.init_lr + (self.warmup_rate * steps_in_stage)) elif (stage == 1): self.lr = self.peak_lr elif (stage == 2): self.lr = (self.peak_lr * math.exp(((- self.decay_factor) * steps_in_stage))) elif (stage == 3): self.lr = self.final_lr else: raise ValueError('Undefined stage') self.optimizer.set_lr(self.lr) return self.lr
@register_lr_scheduler('triangular') class TriangularSchedule(FairseqLRScheduler): 'Assign LR based on a triangular cyclical schedule.\n\n See https://arxiv.org/pdf/1506.01186.pdf for details.\n ' def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with triangular. Consider --lr-scheduler=fixed instead.') lr = args.lr[0] assert (args.max_lr > lr), 'max_lr must be more than lr' self.min_lr = lr self.max_lr = args.max_lr self.stepsize = (args.lr_period_updates // 2) self.lr_shrink = args.lr_shrink self.shrink_min = args.shrink_min self.lr = self.min_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): 'Add arguments to the parser for this LR scheduler.' parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period (cycle length)') parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing') parser.add_argument('--shrink-min', action='store_true', help='if set, also shrinks min lr') def step(self, epoch, val_loss=None): 'Update the learning rate at the end of the given epoch.' super().step(epoch, val_loss) return self.optimizer.get_lr() def step_update(self, num_updates): 'Update the learning rate after each update.' cycle = math.floor((num_updates / (2 * self.stepsize))) lr_shrink = (self.lr_shrink ** cycle) max_lr = (self.max_lr * lr_shrink) if self.shrink_min: min_lr = (self.min_lr * lr_shrink) else: min_lr = self.min_lr x = abs((((num_updates / self.stepsize) - (2 * (cycle + 1))) + 1)) self.lr = (min_lr + ((max_lr - min_lr) * max(0, (1 - x)))) self.optimizer.set_lr(self.lr) return self.lr
@register_optimizer('nag') class FairseqNAG(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = NAG(params, **self.optimizer_config) @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--momentum', default=0.99, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay}
class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): 'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] lr = group['lr'] lr_old = group.get('lr_old', lr) lr_correct = (lr / lr_old) for p in group['params']: if (p.grad is None): continue p_data_fp32 = p.data.float() d_p = p.grad.data.float() param_state = self.state[p] if ('momentum_buffer' not in param_state): param_state['momentum_buffer'] = torch.zeros_like(d_p) else: param_state['momentum_buffer'] = param_state['momentum_buffer'].type_as(d_p) buf = param_state['momentum_buffer'] if (weight_decay != 0): p_data_fp32.mul_((1 - (lr * weight_decay))) p_data_fp32.add_(((momentum * momentum) * lr_correct), buf) p_data_fp32.add_(((- (1 + momentum)) * lr), d_p) buf.mul_((momentum * lr_correct)).add_((- lr), d_p) p.data.copy_(p_data_fp32) group['lr_old'] = lr return loss
@register_optimizer('sgd') class SGD(FairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.SGD(params, **self.optimizer_config) @staticmethod def add_args(parser): 'Add optimizer-specific arguments to the parser.' parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') @property def optimizer_config(self): '\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n ' return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay} @property def supports_flat_params(self): return True
def get_preprocessing_parser(default_task='translation'): parser = get_parser('Preprocessing', default_task) add_preprocess_args(parser) return parser
def get_training_parser(default_task='translation'): parser = get_parser('Trainer', default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) return parser
def get_generation_parser(interactive=False, default_task='translation'): parser = get_parser('Generation', default_task) add_dataset_args(parser, gen=True) add_generation_args(parser) if interactive: add_interactive_args(parser) return parser
def get_interactive_generation_parser(default_task='translation'): return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task='language_modeling'): parser = get_parser('Evaluate Language Model', default_task) add_dataset_args(parser, gen=True) add_eval_lm_args(parser) return parser
def get_validation_parser(default_task=None): parser = get_parser('Validation', default_task) add_dataset_args(parser, train=True) group = parser.add_argument_group('Evaluation') add_common_eval_args(group) return parser
def eval_str_list(x, type=float): if (x is None): return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)]
def eval_bool(x, default=False): if (x is None): return default try: return bool(eval(x)) except TypeError: return default
def parse_args_and_arch(parser: argparse.ArgumentParser, input_args: List[str]=None, parse_known: bool=False, suppress_defaults: bool=False, modify_parser: Optional[Callable[([argparse.ArgumentParser], None)]]=None): '\n Args:\n parser (ArgumentParser): the parser\n input_args (List[str]): strings to parse, defaults to sys.argv\n parse_known (bool): only parse known arguments, similar to\n `ArgumentParser.parse_known_args`\n suppress_defaults (bool): parse while ignoring all default values\n modify_parser (Optional[Callable[[ArgumentParser], None]]):\n function to modify the parser, e.g., to set default values\n ' if suppress_defaults: args = parse_args_and_arch(parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False) suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser]) suppressed_parser.set_defaults(**{k: None for (k, v) in vars(args).items()}) args = suppressed_parser.parse_args(input_args) return argparse.Namespace(**{k: v for (k, v) in vars(args).items() if (v is not None)}) from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY if (modify_parser is not None): modify_parser(parser) (args, _) = parser.parse_known_args(input_args) if hasattr(args, 'arch'): model_specific_group = parser.add_argument_group('Model-specific configuration', argument_default=argparse.SUPPRESS) ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) from fairseq.registry import REGISTRIES for (registry_name, REGISTRY) in REGISTRIES.items(): choice = getattr(args, registry_name, None) if (choice is not None): cls = REGISTRY['registry'][choice] if hasattr(cls, 'add_args'): cls.add_args(parser) if hasattr(args, 'task'): from fairseq.tasks import TASK_REGISTRY TASK_REGISTRY[args.task].add_args(parser) if getattr(args, 'use_bmuf', False): from fairseq.optim.bmuf import FairseqBMUF FairseqBMUF.add_args(parser) if (modify_parser is not None): modify_parser(parser) if parse_known: (args, extra) = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None if (hasattr(args, 'max_sentences_valid') and (args.max_sentences_valid is None)): args.max_sentences_valid = args.max_sentences if (hasattr(args, 'max_tokens_valid') and (args.max_tokens_valid is None)): args.max_tokens_valid = args.max_tokens if getattr(args, 'memory_efficient_fp16', False): args.fp16 = True if hasattr(args, 'arch'): ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return (args, extra) else: return args
def get_parser(desc, default_task='translation'): usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument('--user-dir', default=None) (usr_args, _) = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar') parser.add_argument('--log-interval', type=int, default=1000, metavar='N', help='log progress every N batches (when progress bar is disabled)') parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm']) parser.add_argument('--tensorboard-logdir', metavar='DIR', default='', help='path to save logs for tensorboard, should match --logdir of running tensorboard (default: no tensorboard logging)') parser.add_argument('--seed', default=1, type=int, metavar='N', help='pseudo random number generator seed') parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA') parser.add_argument('--fp16', action='store_true', help='use FP16') parser.add_argument('--memory-efficient-fp16', action='store_true', help='use a memory-efficient version of FP16 training; implies --fp16') parser.add_argument('--fp16-no-flatten-grads', action='store_true', help="don't flatten FP16 grads tensor") parser.add_argument('--fp16-init-scale', default=(2 ** 7), type=int, help='default FP16 loss scale') parser.add_argument('--fp16-scale-window', type=int, help='number of updates before increasing loss scale') parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float, help='pct of updates that can overflow before decreasing the loss scale') parser.add_argument('--min-loss-scale', default=0.0001, type=float, metavar='D', help='minimum FP16 loss scale, after which training is stopped') parser.add_argument('--threshold-loss-scale', type=float, help='threshold FP16 loss scale from below') parser.add_argument('--user-dir', default=None, help='path to a python module containing custom extensions (tasks and/or architectures)') parser.add_argument('--empty-cache-freq', default=0, type=int, help='how often to clear the PyTorch CUDA cache (0 to disable)') parser.add_argument('--all-gather-list-size', default=16384, type=int, help='number of bytes reserved for gathering stats from workers') from fairseq.registry import REGISTRIES for (registry_name, REGISTRY) in REGISTRIES.items(): parser.add_argument(('--' + registry_name.replace('_', '-')), default=REGISTRY['default'], choices=REGISTRY['registry'].keys()) from fairseq.tasks import TASK_REGISTRY parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task') return parser
def add_preprocess_args(parser): group = parser.add_argument_group('Preprocessing') group.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') group.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') group.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix') group.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes') group.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes') group.add_argument('--align-suffix', metavar='FP', default=None, help='alignment file suffix') group.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir') group.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') group.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') group.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary') group.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary') group.add_argument('--nwordstgt', metavar='N', default=(- 1), type=int, help='number of target words to retain') group.add_argument('--nwordssrc', metavar='N', default=(- 1), type=int, help='number of source words to retain') group.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)') parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary') group.add_argument('--only-source', action='store_true', help='Only process the source language') group.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N') group.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers') return parser
def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group('Dataset and data loading') group.add_argument('--num-workers', default=1, type=int, metavar='N', help='how many subprocesses to use for data loading') group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true', help='ignore too long or too short lines in valid and test set') group.add_argument('--max-tokens', type=int, metavar='N', help='maximum number of tokens in a batch') group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N', help='maximum number of sentences in a batch') group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N', help='batch size will be a multiplier of this value') parser.add_argument('--dataset-impl', metavar='FORMAT', choices=get_available_dataset_impl(), help='output dataset implementation') if train: group.add_argument('--train-subset', default='train', metavar='SPLIT', help='data subset to use for training (e.g. train, valid, test)') group.add_argument('--valid-subset', default='valid', metavar='SPLIT', help='comma separated list of data subsets to use for validation (e.g. train, valid, test)') group.add_argument('--validate-interval', type=int, default=1, metavar='N', help='validate every N epochs') group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N', help='specified random seed for validation') group.add_argument('--disable-validation', action='store_true', help='disable validation') group.add_argument('--max-tokens-valid', type=int, metavar='N', help='maximum number of tokens in a validation batch (defaults to --max-tokens)') group.add_argument('--max-sentences-valid', type=int, metavar='N', help='maximum number of sentences in a validation batch (defaults to --max-sentences)') group.add_argument('--curriculum', default=0, type=int, metavar='N', help="don't shuffle batches for first N epochs") if gen: group.add_argument('--gen-subset', default='test', metavar='SPLIT', help='data subset to generate (train, valid, test)') group.add_argument('--num-shards', default=1, type=int, metavar='N', help='shard generation over N shards') group.add_argument('--shard-id', default=0, type=int, metavar='ID', help='id of the shard to generate (id < num_shards)') return group
def add_distributed_training_args(parser): group = parser.add_argument_group('Distributed training') group.add_argument('--distributed-world-size', type=int, metavar='N', default=max(1, torch.cuda.device_count()), help='total number of GPUs across all nodes (default: all visible GPUs)') group.add_argument('--distributed-rank', default=0, type=int, help='rank of the current worker') group.add_argument('--distributed-backend', default='nccl', type=str, help='distributed backend') group.add_argument('--distributed-init-method', default=None, type=str, help='typically tcp://hostname:port that will be used to establish initial connetion') group.add_argument('--distributed-port', default=(- 1), type=int, help='port number (not required if using --distributed-init-method)') group.add_argument('--device-id', '--local_rank', default=0, type=int, help='which GPU to use (usually configured automatically)') group.add_argument('--distributed-no-spawn', action='store_true', help='do not spawn multiple processes even if multiple GPUs are visible') group.add_argument('--ddp-backend', default='c10d', type=str, choices=['c10d', 'no_c10d'], help='DistributedDataParallel backend') group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB', help='bucket size for reduction') group.add_argument('--fix-batches-to-gpus', action='store_true', help="don't shuffle batches between GPUs; this reduces overall randomness and may affect precision but avoids the cost of re-reading the data") group.add_argument('--find-unused-parameters', default=False, action='store_true', help='disable unused parameter detection (not applicable to no_c10d ddp-backend') group.add_argument('--fast-stat-sync', default=False, action='store_true', help='[deprecated] this is now defined per Criterion') group.add_argument('--broadcast-buffers', default=False, action='store_true', help='Copy non-trainable parameters between GPUs, such as batchnorm population statistics') return group
def add_optimization_args(parser): group = parser.add_argument_group('Optimization') group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N', help='force stop training at specified epoch') group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N', help='force stop training at specified update') group.add_argument('--clip-norm', default=25, type=float, metavar='NORM', help='clip threshold of gradients') group.add_argument('--sentence-avg', action='store_true', help='normalize gradients by the number of sentences in a batch (default is to normalize by number of tokens)') group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K', type=(lambda uf: eval_str_list(uf, type=int)), help='update parameters every N_i batches, when in epoch i') group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list, metavar='LR_1,LR_2,...,LR_N', help='learning rate for the first N epochs; all epochs >N using LR_N (note: this may be interpreted differently depending on --lr-scheduler)') group.add_argument('--min-lr', default=(- 1), type=float, metavar='LR', help='stop training when the learning rate reaches this minimum') group.add_argument('--use-bmuf', default=False, action='store_true', help='specify global optimizer for syncing models on different GPUs/shards') return group
def add_checkpoint_args(parser): group = parser.add_argument_group('Checkpointing') group.add_argument('--save-dir', metavar='DIR', default='checkpoints', help='path to save checkpoints') group.add_argument('--restore-file', default='checkpoint_last.pt', help='filename from which to load checkpoint (default: <save-dir>/checkpoint_last.pt') group.add_argument('--reset-dataloader', action='store_true', help='if set, does not reload dataloader state from the checkpoint') group.add_argument('--reset-lr-scheduler', action='store_true', help='if set, does not load lr scheduler state from the checkpoint') group.add_argument('--reset-meters', action='store_true', help='if set, does not load meters from the checkpoint') group.add_argument('--reset-optimizer', action='store_true', help='if set, does not load optimizer state from the checkpoint') group.add_argument('--optimizer-overrides', default='{}', type=str, metavar='DICT', help='a dictionary used to override optimizer args when loading a checkpoint') group.add_argument('--save-interval', type=int, default=1, metavar='N', help='save a checkpoint every N epochs') group.add_argument('--save-interval-updates', type=int, default=0, metavar='N', help='save a checkpoint (and validate) every N updates') group.add_argument('--keep-interval-updates', type=int, default=(- 1), metavar='N', help='keep the last N checkpoints saved with --save-interval-updates') group.add_argument('--keep-last-epochs', type=int, default=(- 1), metavar='N', help='keep last N epoch checkpoints') group.add_argument('--keep-best-checkpoints', type=int, default=(- 1), metavar='N', help='keep best N checkpoints based on scores') group.add_argument('--no-save', action='store_true', help="don't save models or checkpoints") group.add_argument('--no-epoch-checkpoints', action='store_true', help='only store last and best checkpoints') group.add_argument('--no-last-checkpoints', action='store_true', help="don't store last checkpoints") group.add_argument('--no-save-optimizer-state', action='store_true', help="don't save optimizer-state as part of checkpoint") group.add_argument('--best-checkpoint-metric', type=str, default='loss', help='metric to use for saving "best" checkpoints') group.add_argument('--maximize-best-checkpoint-metric', action='store_true', help='select the largest metric value for saving "best" checkpoints') group.add_argument('--patience', type=int, default=(- 1), metavar='N', help="early stop training if valid performance doesn't improve for N consecutive validation runs; note that this is influenced by --validate-interval") return group
def add_common_eval_args(group): group.add_argument('--path', metavar='FILE', help='path(s) to model file(s), colon separated') group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE tokens before scoring (can be set to sentencepiece)') group.add_argument('--quiet', action='store_true', help='only print final scores') group.add_argument('--model-overrides', default='{}', type=str, metavar='DICT', help='a dictionary used to override model args at generation that were used during model training') group.add_argument('--results-path', metavar='RESDIR', type=str, default=None, help='path to save eval results (optional)"')
def add_eval_lm_args(parser): group = parser.add_argument_group('LM Evaluation') add_common_eval_args(group) group.add_argument('--output-word-probs', action='store_true', help='if set, outputs words and their predicted log probabilities to standard output') group.add_argument('--output-word-stats', action='store_true', help='if set, outputs word statistics such as word count, average probability, etc') group.add_argument('--context-window', default=0, type=int, metavar='N', help='ensures that every evaluated token has access to a context of at least this size, if possible') group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N', help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens in order to fit into GPU memory') group.add_argument('--res-file', type=str, default='', help='File name to save results')
def add_generation_args(parser): group = parser.add_argument_group('Generation') add_common_eval_args(group) group.add_argument('--beam', default=5, type=int, metavar='N', help='beam size') group.add_argument('--nbest', default=1, type=int, metavar='N', help='number of hypotheses to output') group.add_argument('--max-len-a', default=0, type=float, metavar='N', help='generate sequences of maximum length ax + b, where x is the source length') group.add_argument('--max-len-b', default=200, type=int, metavar='N', help='generate sequences of maximum length ax + b, where x is the source length') group.add_argument('--min-len', default=1, type=float, metavar='N', help='minimum generation length') group.add_argument('--match-source-len', default=False, action='store_true', help='generations should match the source length') group.add_argument('--no-early-stop', action='store_true', help='deprecated') group.add_argument('--unnormalized', action='store_true', help='compare unnormalized hypothesis scores') group.add_argument('--no-beamable-mm', action='store_true', help="don't use BeamableMM in attention layers") group.add_argument('--lenpen', default=1, type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--unkpen', default=0, type=float, help='unknown word penalty: <0 produces more unks, >0 produces fewer') group.add_argument('--replace-unk', nargs='?', const=True, default=None, help='perform unknown replacement (optionally with alignment dictionary)') group.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') group.add_argument('--score-reference', action='store_true', help='just score the reference translation') group.add_argument('--prefix-size', default=0, type=int, metavar='PS', help='initialize generation by target prefix of given length') group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N', help='ngram blocking such that this size ngram cannot be repeated in the generation') group.add_argument('--sampling', action='store_true', help='sample hypotheses instead of using beam search') group.add_argument('--sampling-topk', default=(- 1), type=int, metavar='PS', help='sample from top K likely next words instead of all words') group.add_argument('--sampling-topp', default=(- 1.0), type=float, metavar='PS', help='sample from the smallest set whose cumulative probability mass exceeds p for next words') group.add_argument('--temperature', default=1.0, type=float, metavar='N', help='temperature for generation') group.add_argument('--diverse-beam-groups', default=(- 1), type=int, metavar='N', help='number of groups for Diverse Beam Search') group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N', help='strength of diversity penalty for Diverse Beam Search') group.add_argument('--diversity-rate', default=(- 1.0), type=float, metavar='N', help='strength of diversity penalty for Diverse Siblings Search') group.add_argument('--print-alignment', action='store_true', help='if set, uses attention feedback to compute and print alignment to source tokens') group.add_argument('--print-step', action='store_true') group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N', help='if > 0.0, it penalized early-stopping in decoding.') group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N', help='maximum iterations for iterative refinement.') group.add_argument('--iter-decode-force-max-iter', action='store_true', help='if set, run exact the maximum number of iterations without early stop') group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N', help='if > 1, model will generate translations varying by the lengths.') (group.add_argument('--iter-decode-with-external-reranker', action='store_true', help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),) group.add_argument('--retain-iter-history', action='store_true', help='if set, decoding returns the whole history of iterative refinement') group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs']) return group
def add_interactive_args(parser): group = parser.add_argument_group('Interactive') group.add_argument('--buffer-size', default=0, type=int, metavar='N', help='read this many sentences into a buffer before processing them') group.add_argument('--input', default='-', type=str, metavar='FILE', help='file to read from; use - for stdin')
def add_model_args(parser): group = parser.add_argument_group('Model configuration') from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='Model Architecture') return group
class MultiprocessingPdb(pdb.Pdb): 'A Pdb wrapper that works in a multiprocessing environment.\n\n Usage: `from fairseq import pdb; pdb.set_trace()`\n ' def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if (_stdin_fd is not None): if (not _stdin[0]): _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak
def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'): if (args.log_format is None): args.log_format = (no_progress_bar if args.no_progress_bar else default) if ((args.log_format == 'tqdm') and (not sys.stderr.isatty())): args.log_format = 'simple' if (args.log_format == 'json'): bar = json_progress_bar(iterator, epoch, prefix, args.log_interval) elif (args.log_format == 'none'): bar = noop_progress_bar(iterator, epoch, prefix) elif (args.log_format == 'simple'): bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval) elif (args.log_format == 'tqdm'): bar = tqdm_progress_bar(iterator, epoch, prefix) else: raise ValueError('Unknown log format: {}'.format(args.log_format)) if (args.tensorboard_logdir and distributed_utils.is_master(args)): try: import palaas from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper bar = fb_tbmf_wrapper(bar, args, args.log_interval) except ImportError: bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args) return bar
def format_stat(stat): if isinstance(stat, Number): stat = '{:g}'.format(stat) elif isinstance(stat, AverageMeter): stat = '{:.3f}'.format(stat.avg) elif isinstance(stat, TimeMeter): stat = '{:g}'.format(round(stat.avg)) elif isinstance(stat, StopwatchMeter): stat = '{:g}'.format(round(stat.sum)) elif torch.is_tensor(stat): stat = stat.tolist() return stat
class progress_bar(object): 'Abstract class for progress bars.' def __init__(self, iterable, epoch=None, prefix=None): self.iterable = iterable self.offset = getattr(iterable, 'offset', 0) self.epoch = epoch self.prefix = '' if (epoch is not None): self.prefix += 'epoch {:03d}'.format(epoch) if (prefix is not None): self.prefix += ' | {}'.format(prefix) def __len__(self): return len(self.iterable) def __enter__(self): return self def __exit__(self, *exc): return False def __iter__(self): raise NotImplementedError def log(self, stats, tag=None, step=None): 'Log intermediate stats according to log_interval.' raise NotImplementedError def print(self, stats, tag=None, step=None): 'Print end-of-epoch stats.' raise NotImplementedError def _str_commas(self, stats): return ', '.join((((key + '=') + stats[key].strip()) for key in stats.keys())) def _str_pipes(self, stats): return ' | '.join((((key + ' ') + stats[key].strip()) for key in stats.keys())) def _format_stats(self, stats): postfix = OrderedDict(stats) for key in postfix.keys(): postfix[key] = str(format_stat(postfix[key])) return postfix
@contextmanager def rename_logger(logger, new_name): old_name = logger.name if (new_name is not None): logger.name = new_name (yield logger) logger.name = old_name
class json_progress_bar(progress_bar): 'Log output in JSON format.' def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.stats = None self.tag = None def __iter__(self): size = float(len(self.iterable)) for (i, obj) in enumerate(self.iterable, start=self.offset): (yield obj) if ((self.stats is not None) and (i > 0) and (self.log_interval is not None) and (((i + 1) % self.log_interval) == 0)): update = (((self.epoch - 1) + float((i / size))) if (self.epoch is not None) else None) stats = self._format_stats(self.stats, epoch=self.epoch, update=update) with rename_logger(logger, self.tag): logger.info(json.dumps(stats)) def log(self, stats, tag=None, step=None): 'Log intermediate stats according to log_interval.' self.stats = stats self.tag = tag def print(self, stats, tag=None, step=None): 'Print end-of-epoch stats.' self.stats = stats if (tag is not None): self.stats = OrderedDict([(((tag + '_') + k), v) for (k, v) in self.stats.items()]) stats = self._format_stats(self.stats, epoch=self.epoch) with rename_logger(logger, tag): logger.info(json.dumps(stats)) def _format_stats(self, stats, epoch=None, update=None): postfix = OrderedDict() if (epoch is not None): postfix['epoch'] = epoch if (update is not None): postfix['update'] = round(update, 3) for key in stats.keys(): postfix[key] = format_stat(stats[key]) return postfix
class noop_progress_bar(progress_bar): 'No logging.' def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) def __iter__(self): for obj in self.iterable: (yield obj) def log(self, stats, tag=None, step=None): 'Log intermediate stats according to log_interval.' pass def print(self, stats, tag=None, step=None): 'Print end-of-epoch stats.' pass
class simple_progress_bar(progress_bar): 'A minimal logger for non-TTY environments.' def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.stats = None self.tag = None def __iter__(self): size = len(self.iterable) for (i, obj) in enumerate(self.iterable, start=self.offset): (yield obj) if ((self.stats is not None) and (i > 0) and (self.log_interval is not None) and (((i + 1) % self.log_interval) == 0)): postfix = self._str_commas(self.stats) with rename_logger(logger, self.tag): logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix)) def log(self, stats, tag=None, step=None): 'Log intermediate stats according to log_interval.' self.stats = self._format_stats(stats) self.tag = tag def print(self, stats, tag=None, step=None): 'Print end-of-epoch stats.' postfix = self._str_pipes(self._format_stats(stats)) with rename_logger(logger, tag): logger.info('{} | {}'.format(self.prefix, postfix))
class tqdm_progress_bar(progress_bar): 'Log to tqdm.' def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) from tqdm import tqdm self.tqdm = tqdm(iterable, self.prefix, leave=False) def __iter__(self): return iter(self.tqdm) def log(self, stats, tag=None, step=None): 'Log intermediate stats according to log_interval.' self.tqdm.set_postfix(self._format_stats(stats), refresh=False) def print(self, stats, tag=None, step=None): 'Print end-of-epoch stats.' postfix = self._str_pipes(self._format_stats(stats)) self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
class tensorboard_log_wrapper(progress_bar): 'Log to tensorboard.' def __init__(self, wrapped_bar, tensorboard_logdir, args): self.wrapped_bar = wrapped_bar self.tensorboard_logdir = tensorboard_logdir self.args = args try: from tensorboardX import SummaryWriter self.SummaryWriter = SummaryWriter self._writers = {} except ImportError: logger.warning('tensorboard or required dependencies not found, please see README for using tensorboard. (e.g. pip install tensorboardX)') self.SummaryWriter = None def _writer(self, key): if (self.SummaryWriter is None): return None if (key not in self._writers): self._writers[key] = self.SummaryWriter(os.path.join(self.tensorboard_logdir, key)) self._writers[key].add_text('args', str(vars(self.args))) self._writers[key].add_text('sys.argv', ' '.join(sys.argv)) return self._writers[key] def __iter__(self): return iter(self.wrapped_bar) def log(self, stats, tag=None, step=None): 'Log intermediate stats to tensorboard.' self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.log(stats, tag=tag, step=step) def print(self, stats, tag=None, step=None): 'Print end-of-epoch stats.' self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.print(stats, tag=tag, step=step) def __exit__(self, *exc): for writer in getattr(self, '_writers', {}).values(): writer.close() return False def _log_to_tensorboard(self, stats, tag=None, step=None): writer = self._writer((tag or '')) if (writer is None): return if (step is None): step = stats['num_updates'] for key in (stats.keys() - {'num_updates'}): if isinstance(stats[key], AverageMeter): writer.add_scalar(key, stats[key].val, step) elif isinstance(stats[key], Number): writer.add_scalar(key, stats[key], step)
def setup_registry(registry_name: str, base_class=None, default=None): assert registry_name.startswith('--') registry_name = registry_name[2:].replace('-', '_') REGISTRY = {} REGISTRY_CLASS_NAMES = set() if (registry_name in REGISTRIES): return REGISTRIES[registry_name] = {'registry': REGISTRY, 'default': default} def build_x(args, *extra_args, **extra_kwargs): choice = getattr(args, registry_name, None) if (choice is None): return None cls = REGISTRY[choice] if hasattr(cls, ('build_' + registry_name)): builder = getattr(cls, ('build_' + registry_name)) else: builder = cls set_defaults(args, cls) return builder(args, *extra_args, **extra_kwargs) def register_x(name): def register_x_cls(cls): if (name in REGISTRY): raise ValueError('Cannot register duplicate {} ({})'.format(registry_name, name)) if (cls.__name__ in REGISTRY_CLASS_NAMES): raise ValueError('Cannot register {} with duplicate class name ({})'.format(registry_name, cls.__name__)) if ((base_class is not None) and (not issubclass(cls, base_class))): raise ValueError('{} must extend {}'.format(cls.__name__, base_class.__name__)) REGISTRY[name] = cls REGISTRY_CLASS_NAMES.add(cls.__name__) return cls return register_x_cls return (build_x, register_x, REGISTRY)
def set_defaults(args, cls): 'Helper to set default arguments based on *add_args*.' if (not hasattr(cls, 'add_args')): return parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False) cls.add_args(parser) defaults = argparse.Namespace() for action in parser._actions: if (action.dest is not argparse.SUPPRESS): if (not hasattr(defaults, action.dest)): if (action.default is not argparse.SUPPRESS): setattr(defaults, action.dest, action.default) for (key, default_value) in vars(defaults).items(): if (not hasattr(args, key)): setattr(args, key, default_value)
def setup_task(args, **kwargs): return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name): "\n New tasks can be added to fairseq with the\n :func:`~fairseq.tasks.register_task` function decorator.\n\n For example::\n\n @register_task('classification')\n class ClassificationTask(FairseqTask):\n (...)\n\n .. note::\n\n All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`\n interface.\n\n Please see the\n\n Args:\n name (str): the name of the task\n " def register_task_cls(cls): if (name in TASK_REGISTRY): raise ValueError('Cannot register duplicate task ({})'.format(name)) if (not issubclass(cls, FairseqTask)): raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__)) if (cls.__name__ in TASK_CLASS_NAMES): raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__)) TASK_REGISTRY[name] = cls TASK_CLASS_NAMES.add(cls.__name__) return cls return register_task_cls
def get_task(name): return TASK_REGISTRY[name]
@register_task('audio_pretraining') class AudioPretrainingTask(FairseqTask): '\n\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='path to data directory') parser.add_argument('--sample-rate', default=16000, type=int, help='target sample rate. audio files will be up/down sampled to this rate') parser.add_argument('--max-sample-size', default=None, type=int, help='max sample size to crop to for batching. default = min sample length') parser.add_argument('--min-sample-size', default=None, type=int, help='min sample size to crop to for batching. default = same as --max-sample-size') def __init__(self, args): super().__init__(args) @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' return cls(args) def load_dataset(self, split, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' manifest = os.path.join(self.args.data, '{}.tsv'.format(split)) self.datasets[split] = FileAudioDataset(manifest, sample_rate=self.args.sample_rate, max_sample_size=self.args.max_sample_size, min_sample_size=self.args.min_sample_size) @property def target_dictionary(self): 'Return the :class:`~fairseq.data.Dictionary` for the language\n model.' return None
@register_task('cross_lingual_lm') class CrossLingualLMTask(FairseqTask): '\n Task for training cross-lingual language models.\n\n For more details look at: https://arxiv.org/pdf/1901.07291.pdf\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample') parser.add_argument('--monolingual-langs', default='en', type=str, help='comma separated list of languages for which we want to train XLM on') parser.add_argument('--shuffle', action='store_true', help='shuffle each monolingual dataset while training') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.distributed_world_size = args.distributed_world_size self.langs2id = self._lang_to_id(args.monolingual_langs) def _lang_to_id(self, languages: str): '\n Build a map from languages to ids. These ids are used as segment labels\n for cross-lingual LM training.\n ' lang2id = {} langs = [l.strip() for l in languages.split(',')] for (id, lang) in enumerate(langs): lang2id[lang] = id return lang2id @classmethod def load_dictionary(cls, filename): return MaskedLMDictionary.load(filename) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): d = MaskedLMDictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @property def target_dictionary(self): return self.dictionary @classmethod def setup_task(cls, args, **kwargs): 'Setup the task.' dictionary = MaskedLMDictionary.load(os.path.join(args.data, 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def _load_single_lang_dataset(self, split, epoch): loaded_datasets = [] paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) path = os.path.join(data_path, split_k) ds = data_utils.load_indexed_dataset(path, self.dictionary, self.args.dataset_impl) if (ds is None): if (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) loaded_datasets.append(TokenBlockDataset(ds, ds.sizes, (self.args.tokens_per_sample - 1), pad=self.dictionary.pad(), eos=self.dictionary.eos())) logger.info('{} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)]))) if (len(loaded_datasets) == 1): dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) return (dataset, sizes) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' dataset_map = OrderedDict() for lang in self.langs2id.keys(): language_split = '{}.{}'.format(split, lang) (block_dataset, sizes) = self._load_single_lang_dataset(split=language_split, epoch=epoch) dataset_map[lang] = MaskedLMDataset(dataset=block_dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.eos(), sep_token_idx=self.dictionary.eos(), shuffle=getattr(self.args, 'shuffle', False), has_pairs=False, segment_id=self.langs2id[lang], seed=self.seed) self.datasets[split] = MultiCorpusSampledDataset(dataset_map) logger.info('{} {} {} examples'.format(self.args.data.split(os.pathsep)[epoch], split, len(self.datasets[split])))
@register_task('denoising') class DenoisingTask(FairseqTask): '\n Denoising task for applying sequence to sequence denoising. (ie. BART)\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='path to data directory') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for dataset') parser.add_argument('--sample-break-mode', default='complete_doc', type=str, help='mode for breaking sentence') parser.add_argument('--mask', default=0.0, type=float, help='fraction of words/subwords that will be masked') parser.add_argument('--mask-random', default=0.0, type=float, help='instead of using [MASK], use random token this often') parser.add_argument('--insert', default=0.0, type=float, help='insert this percentage of additional random tokens') parser.add_argument('--permute', default=0.0, type=float, help='take this proportion of subwords and permute them') parser.add_argument('--rotate', default=0.5, type=float, help='rotate this proportion of inputs') parser.add_argument('--poisson-lambda', default=3.0, type=float, help='randomly shuffle sentences for this proportion of inputs') parser.add_argument('--permute-sentences', default=0.0, type=float, help='shuffle this proportion of sentences in all inputs') parser.add_argument('--mask-length', default='subword', type=str, choices=['subword', 'word', 'span-poisson'], help='mask length to choose') parser.add_argument('--replace-length', default=(- 1), type=int, help='when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = self.dictionary.add_symbol('<mask>') @classmethod def setup_task(cls, args, **kwargs): 'Setup the task.\n ' dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) if (not hasattr(args, 'shuffle_instance')): args.shuffle_instance = False return cls(args, dictionary) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset(split_path, self.dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = StripTokenDataset(dataset, self.dictionary.eos()) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 2), pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, document_sep_len=0) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) dataset = AppendTokenDataset(dataset, self.source_dictionary.eos()) mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if (self.args.mask_length != 'subword') else None) self.datasets[split] = DenoisingDataset(dataset, dataset.sizes, self.dictionary, self.mask_idx, mask_whole_words, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args) logger.info('Split: {0}, Loaded {1} samples of denoising_dataset'.format(split, len(self.datasets[split]))) def max_positions(self): 'Return the max sentence length allowed by the task.' return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): 'Return the source :class:`~fairseq.data.Dictionary`.' return self.dictionary @property def target_dictionary(self): 'Return the target :class:`~fairseq.data.Dictionary`.' return self.dictionary
class FairseqTask(object): '\n Tasks store dictionaries and provide helpers for loading/iterating over\n Datasets, initializing the Model/Criterion and calculating the loss.\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' pass def __init__(self, args): self.args = args self.datasets = {} self.dataset_to_epoch_iter = {} @classmethod def load_dictionary(cls, filename): 'Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n ' return Dictionary.load(filename) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): 'Build the dictionary\n\n Args:\n filenames (list): list of filenames\n workers (int): number of concurrent workers\n threshold (int): defines the minimum word count\n nwords (int): defines the total number of words in the final dictionary,\n including special symbols\n padding_factor (int): can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n ' d = Dictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' return cls(args, **kwargs) def load_dataset(self, split, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' raise NotImplementedError def dataset(self, split): '\n Return a loaded dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n\n Returns:\n a :class:`~fairseq.data.FairseqDataset` corresponding to *split*\n ' from fairseq.data import FairseqDataset if (split not in self.datasets): raise KeyError(('Dataset not loaded: ' + split)) if (not isinstance(self.datasets[split], FairseqDataset)): raise TypeError('Datasets are expected to be of type FairseqDataset') return self.datasets[split] def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0): "\n Get an iterator that yields batches of data from the given dataset.\n\n Args:\n dataset (~fairseq.data.FairseqDataset): dataset to batch\n max_tokens (int, optional): max number of tokens in each batch\n (default: None).\n max_sentences (int, optional): max number of sentences in each\n batch (default: None).\n max_positions (optional): max sentence length supported by the\n model (default: None).\n ignore_invalid_inputs (bool, optional): don't raise Exception for\n sentences that are too long (default: False).\n required_batch_size_multiple (int, optional): require batch size to\n be a multiple of N (default: 1).\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 0).\n Returns:\n ~fairseq.iterators.EpochBatchIterator: a batched iterator over the\n given dataset split\n " if (dataset in self.dataset_to_epoch_iter): return self.dataset_to_epoch_iter[dataset] assert isinstance(dataset, FairseqDataset) dataset.set_epoch(epoch) with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() if (max_positions is not None): indices = data_utils.filter_by_size(indices, dataset, max_positions, raise_exception=(not ignore_invalid_inputs)) batch_sampler = data_utils.batch_by_size(indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple) epoch_iter = iterators.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch) self.dataset_to_epoch_iter[dataset] = epoch_iter return epoch_iter def build_model(self, args): '\n Build the :class:`~fairseq.models.BaseFairseqModel` instance for this\n task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.models.BaseFairseqModel` instance\n ' from fairseq import models return models.build_model(args, self) def build_criterion(self, args): '\n Build the :class:`~fairseq.criterions.FairseqCriterion` instance for\n this task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.criterions.FairseqCriterion` instance\n ' from fairseq import criterions return criterions.build_criterion(args, self) def build_generator(self, args): if getattr(args, 'score_reference', False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer(self.target_dictionary, compute_alignment=getattr(args, 'print_alignment', False)) from fairseq.sequence_generator import SequenceGenerator, SequenceGeneratorWithAlignment sampling = getattr(args, 'sampling', False) sampling_topk = getattr(args, 'sampling_topk', (- 1)) sampling_topp = getattr(args, 'sampling_topp', (- 1.0)) diverse_beam_groups = getattr(args, 'diverse_beam_groups', (- 1)) diverse_beam_strength = (getattr(args, 'diverse_beam_strength', 0.5),) match_source_len = getattr(args, 'match_source_len', False) diversity_rate = getattr(args, 'diversity_rate', (- 1)) if (sum((int(cond) for cond in [sampling, (diverse_beam_groups > 0), match_source_len, (diversity_rate > 0)])) > 1): raise ValueError('Provided Search parameters are mutually exclusive.') assert ((sampling_topk < 0) or sampling), '--sampling-topk requires --sampling' assert ((sampling_topp < 0) or sampling), '--sampling-topp requires --sampling' if sampling: search_strategy = search.Sampling(self.target_dictionary, sampling_topk, sampling_topp) elif (diverse_beam_groups > 0): search_strategy = search.DiverseBeamSearch(self.target_dictionary, diverse_beam_groups, diverse_beam_strength) elif match_source_len: search_strategy = search.LengthConstrainedBeamSearch(self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0) elif (diversity_rate > (- 1)): search_strategy = search.DiverseSiblingsSearch(self.target_dictionary, diversity_rate) else: search_strategy = search.BeamSearch(self.target_dictionary) if getattr(args, 'print_alignment', False): seq_gen_cls = SequenceGeneratorWithAlignment else: seq_gen_cls = SequenceGenerator return seq_gen_cls(self.target_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), normalize_scores=(not getattr(args, 'unnormalized', False)), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.0), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), search_strategy=search_strategy) def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): '\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.FairseqDataset`.\n model (~fairseq.models.BaseFairseqModel): the model\n criterion (~fairseq.criterions.FairseqCriterion): the criterion\n optimizer (~fairseq.optim.FairseqOptimizer): the optimizer\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n ' model.train() (loss, sample_size, logging_output) = criterion(model, sample) if ignore_grad: loss *= 0 optimizer.backward(loss) return (loss, sample_size, logging_output) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): (loss, sample_size, logging_output) = criterion(model, sample) return (loss, sample_size, logging_output) def inference_step(self, generator, models, sample, prefix_tokens=None): with torch.no_grad(): return generator.generate(models, sample, prefix_tokens=prefix_tokens) def begin_epoch(self, epoch, model): 'Hook function called before the start of each epoch.' pass def update_step(self, num_updates): 'Task level update when number of updates increases.\n\n This is called after the optimization step and learning rate\n update at each iteration.\n ' pass def aggregate_logging_outputs(self, logging_outputs, criterion): '[deprecated] Aggregate logging outputs from data parallel training.' utils.deprecation_warning('The aggregate_logging_outputs API is deprecated. Please use the reduce_metrics API instead.') with metrics.aggregate() as agg: self.reduce_metrics(logging_outputs, criterion) return agg.get_smoothed_values() def reduce_metrics(self, logging_outputs, criterion): 'Aggregate logging outputs from data parallel training.' base_func = FairseqTask.aggregate_logging_outputs self_func = getattr(self, 'aggregate_logging_outputs').__func__ if (self_func is not base_func): utils.deprecation_warning('Tasks should implement the reduce_metrics API. Falling back to deprecated aggregate_logging_outputs API.') agg_logging_outputs = self.aggregate_logging_outputs(logging_outputs, criterion) for (k, v) in agg_logging_outputs.items(): metrics.log_scalar(k, v) return if (not any((('ntokens' in log) for log in logging_outputs))): warnings.warn('ntokens not found in Criterion logging outputs, cannot log wpb or wps') else: ntokens = sum((log.get('ntokens', 0) for log in logging_outputs)) metrics.log_scalar('wpb', ntokens, priority=180, round=1) metrics.log_speed('wps', ntokens, priority=90, round=1) if (not any((('nsentences' in log) for log in logging_outputs))): warnings.warn('nsentences not found in Criterion logging outputs, cannot log bsz') else: nsentences = sum((log.get('nsentences', 0) for log in logging_outputs)) metrics.log_scalar('bsz', nsentences, priority=190, round=1) criterion.__class__.reduce_metrics(logging_outputs) def max_positions(self): 'Return the max input length allowed by the task.' return None @property def source_dictionary(self): 'Return the source :class:`~fairseq.data.Dictionary` (if applicable\n for this task).' raise NotImplementedError @property def target_dictionary(self): 'Return the target :class:`~fairseq.data.Dictionary` (if applicable\n for this task).' raise NotImplementedError
@register_task('legacy_masked_lm') class LegacyMaskedLMTask(FairseqTask): '\n Task for training Masked LM (BERT) model.\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--break-mode', default='doc', type=str, help='mode for breaking sentence') parser.add_argument('--shuffle-dataset', action='store_true', default=False) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed @classmethod def load_dictionary(cls, filename): return BertDictionary.load(filename) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): d = BertDictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @property def target_dictionary(self): return self.dictionary @classmethod def setup_task(cls, args, **kwargs): 'Setup the task.\n ' paths = args.data.split(os.pathsep) assert (len(paths) > 0) dictionary = BertDictionary.load(os.path.join(paths[0], 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=0, combine=False): 'Load a given dataset split.\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' loaded_datasets = [] paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] logger.info('data_path', data_path) for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) path = os.path.join(data_path, split_k) ds = indexed_dataset.make_dataset(path, impl=self.args.dataset_impl, fix_lua_indexing=True, dictionary=self.dictionary) if (ds is None): if (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) with data_utils.numpy_seed((self.seed + k)): loaded_datasets.append(BlockPairDataset(ds, self.dictionary, ds.sizes, self.args.tokens_per_sample, break_mode=self.args.break_mode, doc_break_size=1)) logger.info('{} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)]))) if (not combine): break if (len(loaded_datasets) == 1): dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) self.datasets[split] = MaskedLMDataset(dataset=dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.cls(), sep_token_idx=self.dictionary.sep(), shuffle=self.args.shuffle_dataset, seed=self.seed)
@register_task('masked_lm') class MaskedLMTask(FairseqTask): 'Task for training masked language models (e.g., BERT, RoBERTa).' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--freq-weighted-replacement', default=False, action='store_true', help='sample random replacement words based on word frequencies') parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = dictionary.add_symbol('<mask>') @classmethod def setup_task(cls, args, **kwargs): paths = args.data.split(os.pathsep) assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode) logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if self.args.mask_whole_words else None) (src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words) with data_utils.numpy_seed((self.args.seed + epoch)): shuffle = np.random.permutation(len(src_dataset)) self.datasets[split] = SortDataset(NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True)}, sizes=[src_dataset.sizes]), sort_order=[shuffle, src_dataset.sizes]) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
@register_task('multilingual_masked_lm') class MultiLingualMaskedLMTask(FairseqTask): 'Task for training masked language models (e.g., BERT, RoBERTa).' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--freq-weighted-replacement', action='store_true', help='sample random replacement words based on word frequencies') parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0, help='smoothing alpha for sample rations across multiple datasets') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = dictionary.add_symbol('<mask>') @classmethod def setup_task(cls, args, **kwargs): paths = args.data.split(os.pathsep) assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) logger.info('dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def _get_whole_word_mask(self): if self.args.mask_whole_words: bpe = encoders.build_bpe(self.args) if (bpe is not None): def is_beginning_of_word(i): if (i < self.source_dictionary.nspecial): return True tok = self.source_dictionary[i] if tok.startswith('madeupword'): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(self.source_dictionary))))) else: mask_whole_words = None return mask_whole_words def _get_sample_prob(self, dataset_lens): '\n Get smoothed sampling porbability by languages. This helps low resource\n languages by upsampling them.\n ' prob = (dataset_lens / dataset_lens.sum()) smoothed_prob = (prob ** self.args.multilang_sampling_alpha) smoothed_prob = (smoothed_prob / smoothed_prob.sum()) return smoothed_prob def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] languages = sorted((name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)))) logger.info('Training on {0} languages: {1}'.format(len(languages), languages)) logger.info('Language to id mapping: ', {lang: id for (id, lang) in enumerate(languages)}) mask_whole_words = self._get_whole_word_mask() lang_datasets = [] for (lang_id, language) in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode) logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) (src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words) lang_dataset = NestedDictionaryDataset({'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), 'lang_id': RawLabelDataset(([lang_id] * src_dataset.sizes.shape[0]))}, sizes=[src_dataset.sizes]) lang_datasets.append(lang_dataset) dataset_lengths = np.array([len(d) for d in lang_datasets], dtype=float) logger.info('loaded total {} blocks for all languages'.format(dataset_lengths.sum())) if (split == self.args.train_subset): sample_probs = self._get_sample_prob(dataset_lengths) logger.info('Sample probability by language: ', {lang: '{0:.4f}'.format(sample_probs[id]) for (id, lang) in enumerate(languages)}) size_ratio = ((sample_probs * dataset_lengths.sum()) / dataset_lengths) logger.info('Up/Down Sampling ratio by language: ', {lang: '{0:.2f}'.format(size_ratio[id]) for (id, lang) in enumerate(languages)}) resampled_lang_datasets = [ResamplingDataset(lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=(size_ratio[i] >= 1.0)) for (i, d) in enumerate(lang_datasets)] dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for (lang_id, lang_dataset) in enumerate(lang_datasets): split_name = ((split + '_') + languages[lang_id]) lang_splits.append(split_name) self.datasets[split_name] = lang_dataset if (split in self.args.valid_subset): self.args.valid_subset = self.args.valid_subset.replace(split, ','.join(lang_splits)) with data_utils.numpy_seed((self.args.seed + epoch)): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset(dataset, sort_order=[shuffle, dataset.sizes]) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0): self.dataset_to_epoch_iter = {} epoch_iter = super().get_batch_iterator(dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch) self.dataset_to_epoch_iter = {} return epoch_iter @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
def _lang_token(lang: str): return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str): 'Return language token index.' idx = dic.index(_lang_token(lang)) assert (idx != dic.unk_index), 'cannot find language token for lang {}'.format(lang) return idx
@register_task('multilingual_translation') class MultilingualTranslationTask(FairseqTask): 'A task for training multiple translation models simultaneously.\n\n We iterate round-robin over batches from multiple language pairs, ordered\n according to the `--lang-pairs` argument.\n\n The training loop is roughly:\n\n for i in range(len(epoch)):\n for lang_pair in args.lang_pairs:\n batch = next_batch_for_lang_pair(lang_pair)\n loss = criterion(model_for_lang_pair(lang_pair), batch)\n loss.backward()\n optimizer.step()\n\n In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset\n (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that\n implements the `FairseqMultiModel` interface.\n\n During inference it is required to specify a single `--source-lang` and\n `--target-lang`, which indicates the inference langauge direction.\n `--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to\n the same value as training.\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', metavar='DIR', help='path to data directory') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target language token. (src/tgt)') parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token') def __init__(self, args, dicts, training): super().__init__(args) self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)] self.eval_lang_pairs = self.lang_pairs self.model_lang_pairs = self.lang_pairs self.langs = list(dicts.keys()) @classmethod def setup_task(cls, args, **kwargs): (dicts, training) = cls.prepare(args, **kwargs) return cls(args, dicts, training) @classmethod def prepare(cls, args, **kargs): args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) if (args.lang_pairs is None): raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.') if isinstance(args.lang_pairs, str): args.lang_pairs = args.lang_pairs.split(',') sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})) if ((args.source_lang is not None) or (args.target_lang is not None)): training = False else: training = True dicts = OrderedDict() for lang in sorted_langs: paths = args.data.split(os.pathsep) assert (len(paths) > 0) dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang))) if (len(dicts) > 0): assert (dicts[lang].pad() == dicts[sorted_langs[0]].pad()) assert (dicts[lang].eos() == dicts[sorted_langs[0]].eos()) assert (dicts[lang].unk() == dicts[sorted_langs[0]].unk()) if ((args.encoder_langtok is not None) or args.decoder_langtok): for lang_to_add in sorted_langs: dicts[lang].add_symbol(_lang_token(lang_to_add)) logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang]))) return (dicts, training) def get_encoder_langtok(self, src_lang, tgt_lang): if (self.args.encoder_langtok is None): return self.dicts[src_lang].eos() if (self.args.encoder_langtok == 'src'): return _lang_token_index(self.dicts[src_lang], src_lang) else: return _lang_token_index(self.dicts[src_lang], tgt_lang) def get_decoder_langtok(self, tgt_lang): if (not self.args.decoder_langtok): return self.dicts[tgt_lang].eos() return _lang_token_index(self.dicts[tgt_lang], tgt_lang) def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None): if ((self.args.encoder_langtok is None) and (not self.args.decoder_langtok)): return lang_pair_dataset new_src_eos = None if ((self.args.encoder_langtok is not None) and (src_eos is not None) and (src_lang is not None) and (tgt_lang is not None)): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang) else: src_eos = None new_tgt_bos = None if (self.args.decoder_langtok and (tgt_eos is not None) and (tgt_lang is not None)): new_tgt_bos = self.get_decoder_langtok(tgt_lang) else: tgt_eos = None return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos) def load_dataset(self, split, epoch=0, **kwargs): 'Load a dataset split.' paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] def language_pair_dataset(lang_pair): (src, tgt) = lang_pair.split('-') langpair_dataset = load_langpair_dataset(data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions) return self.alter_dataset_langtok(langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt) self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs]), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang)))) def build_dataset_for_inference(self, src_tokens, src_lengths): lang_pair = ('%s-%s' % (self.args.source_lang, self.args.target_lang)) return RoundRobinZipDatasets(OrderedDict([(lang_pair, self.alter_dataset_langtok(LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang))]), eval_key=lang_pair) def build_model(self, args): def check_args(): messages = [] if (len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0): messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs)) if (self.args.encoder_langtok != args.encoder_langtok): messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok)) if (self.args.decoder_langtok != args.decoder_langtok): messages.append('--decoder-langtok should {} be set.'.format(('' if args.decoder_langtok else 'not'))) if (len(messages) > 0): raise ValueError(' '.join(messages)) check_args() from fairseq import models model = models.build_model(args, self) if (not isinstance(model, FairseqMultiModel)): raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture') return model def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): model.train() (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {}) for lang_pair in self.model_lang_pairs: if ((sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)): continue (loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair]) if ignore_grad: loss *= 0 optimizer.backward(loss) agg_loss += loss.detach().item() agg_sample_size += sample_size agg_logging_output[lang_pair] = logging_output return (agg_loss, agg_sample_size, agg_logging_output) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {}) for lang_pair in self.eval_lang_pairs: if ((lang_pair not in sample) or (sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)): continue (loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair]) agg_loss += loss.data.item() agg_sample_size += sample_size agg_logging_output[lang_pair] = logging_output return (agg_loss, agg_sample_size, agg_logging_output) def inference_step(self, generator, models, sample, prefix_tokens=None): with torch.no_grad(): return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=(_lang_token_index(self.target_dictionary, self.args.target_lang) if self.args.decoder_langtok else self.target_dictionary.eos())) def reduce_metrics(self, logging_outputs, criterion, logging_output_keys=None): logging_output_keys = (logging_output_keys or self.eval_lang_pairs) agg_logging_outputs = {} for key in logging_output_keys: with metrics.aggregate() as agg: logging_outputs_key = [logging_output.get(key, {}) for logging_output in logging_outputs] for k in ['sample_size', 'nsentences', 'ntokens']: metrics.log_scalar(k, sum((l[k] for l in logging_outputs_key))) super().reduce_metrics(logging_outputs_key, criterion) agg_logging_outputs[key] = agg.get_smoothed_values() def sum_over_languages(key): return sum((logging_output[key] for logging_output in agg_logging_outputs.values())) flat_logging_output = {'{}:{}'.format(lang_pair, k): v for (lang_pair, agg_logging_output) in agg_logging_outputs.items() for (k, v) in agg_logging_output.items()} flat_logging_output['loss'] = sum_over_languages('loss') if any((('nll_loss' in logging_output) for logging_output in agg_logging_outputs.values())): flat_logging_output['nll_loss'] = sum_over_languages('nll_loss') flat_logging_output['sample_size'] = sum_over_languages('sample_size') flat_logging_output['nsentences'] = sum_over_languages('nsentences') flat_logging_output['ntokens'] = sum_over_languages('ntokens') return flat_logging_output @property def source_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.source_lang] @property def target_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.target_lang] def max_positions(self): 'Return the max sentence length allowed by the task.' if (len(self.datasets.values()) == 0): return {('%s-%s' % (self.args.source_lang, self.args.target_lang)): (self.args.max_source_positions, self.args.max_target_positions)} return OrderedDict([(key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys()])
def _get_bt_dataset_key(lang_pair): return ('bt:' + lang_pair)
def _get_denoising_dataset_key(lang_pair): return ('denoising:' + lang_pair)
def parse_lambda_config(x): '\n Parse the configuration of lambda coefficient (for scheduling).\n x = "3" # lambda will be a constant equal to x\n x = "0:1,1000:0" # lambda will start from 1 and linearly decrease\n # to 0 during the first 1000 iterations\n x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000\n # iterations, then will linearly increase to 1 until iteration 2000\n ' split = x.split(',') if (len(split) == 1): return (float(x), None) else: split = [s.split(os.pathsep) for s in split] assert all(((len(s) == 2) for s in split)) assert all((k.isdigit() for (k, _) in split)) assert all(((int(split[i][0]) < int(split[(i + 1)][0])) for i in range((len(split) - 1)))) return (float(split[0][1]), [(int(k), float(v)) for (k, v) in split])
@register_task('semisupervised_translation') class SemisupervisedTranslationTask(MultilingualTranslationTask): 'A task for training multiple translation models simultaneously.\n\n We iterate round-robin over batches from multiple language pairs, ordered\n according to the `--lang-pairs` argument.\n\n The training loop is roughly:\n\n for i in range(len(epoch)):\n for lang_pair in args.lang_pairs:\n batch = next_batch_for_lang_pair(lang_pair)\n loss = criterion(model_for_lang_pair(lang_pair), batch)\n loss.backward()\n optimizer.step()\n\n In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset\n (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that\n implements the `FairseqMultiModel` interface.\n\n During inference it is required to specify a single `--source-lang` and\n `--target-lang`, instead of `--lang-pairs`.\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' MultilingualTranslationTask.add_args(parser) parser.add_argument('--lambda-parallel-config', default='1.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (parallel data). use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...') parser.add_argument('--lambda-denoising-config', default='0.0', type=str, metavar='CONFIG', help='Cross-entropy reconstruction coefficient (denoising autoencoding)use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...') parser.add_argument('--lambda-otf-bt-config', default='0.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...') parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the source length') parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the source length') parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N', help='beam size used in beam search of online back-translation') parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', help='maximum word shuffle distance for denoising autoencoding data generation') parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', help='word dropout probability for denoising autoencoding data generation') parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', help='word blanking probability for denoising autoencoding data generation') def __init__(self, args, dicts, training): super().__init__(args, dicts, training) (self.lambda_parallel, self.lambda_parallel_steps) = parse_lambda_config(args.lambda_parallel_config) (self.lambda_otf_bt, self.lambda_otf_bt_steps) = parse_lambda_config(args.lambda_otf_bt_config) (self.lambda_denoising, self.lambda_denoising_steps) = parse_lambda_config(args.lambda_denoising_config) if ((self.lambda_denoising > 0.0) or (self.lambda_denoising_steps is not None)): denoising_lang_pairs = [('%s-%s' % (tgt, tgt)) for tgt in {lang_pair.split('-')[1] for lang_pair in args.lang_pairs}] self.model_lang_pairs = (self.model_lang_pairs + denoising_lang_pairs) self.backtranslate_datasets = {} self.backtranslators = {} @classmethod def setup_task(cls, args, **kwargs): (dicts, training) = MultilingualTranslationTask.prepare(args, **kwargs) return cls(args, dicts, training) def load_dataset(self, split, epoch=0, **kwargs): 'Load a dataset split.' paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] def split_exists(split, src, tgt, lang): if (src is not None): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) else: filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, src, tgt)) return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl) def load_indexed_dataset(path, dictionary): return data_utils.load_indexed_dataset(path, dictionary, self.args.dataset_impl) (src_datasets, tgt_datasets) = ({}, {}) if ((self.lambda_parallel > 0.0) or (self.lambda_parallel_steps is not None) or (not split.startswith('train'))): for lang_pair in self.lang_pairs: (src, tgt) = lang_pair.split('-') if split_exists(split, src, tgt, src): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, src, tgt)) elif split_exists(split, tgt, src, src): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, tgt, src)) else: continue src_datasets[lang_pair] = load_indexed_dataset((prefix + src), self.dicts[src]) tgt_datasets[lang_pair] = load_indexed_dataset((prefix + tgt), self.dicts[tgt]) logger.info('parallel-{} {} {} examples'.format(data_path, split, len(src_datasets[lang_pair]))) if (len(src_datasets) == 0): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) backtranslate_datasets = {} if (((self.lambda_otf_bt > 0.0) or (self.lambda_otf_bt_steps is not None)) and split.startswith('train')): for lang_pair in self.lang_pairs: (src, tgt) = lang_pair.split('-') if (not split_exists(split, tgt, None, tgt)): raise FileNotFoundError('Dataset not found: backtranslation {} ({})'.format(split, data_path)) filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt)) dataset = load_indexed_dataset(filename, self.dicts[tgt]) lang_pair_dataset_tgt = LanguagePairDataset(dataset, dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target) lang_pair_dataset = LanguagePairDataset(dataset, dataset.sizes, src_dict=self.dicts[src], tgt=dataset, tgt_sizes=dataset.sizes, tgt_dict=self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target) backtranslate_datasets[lang_pair] = BacktranslationDataset(tgt_dataset=self.alter_dataset_langtok(lang_pair_dataset_tgt, src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_lang=src), backtranslation_fn=self.backtranslators[lang_pair], src_dict=self.dicts[src], tgt_dict=self.dicts[tgt], output_collater=self.alter_dataset_langtok(lang_pair_dataset=lang_pair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt).collater) logger.info('backtranslate-{}: {} {} {} examples'.format(tgt, data_path, split, len(backtranslate_datasets[lang_pair]))) self.backtranslate_datasets[lang_pair] = backtranslate_datasets[lang_pair] noising_datasets = {} if (((self.lambda_denoising > 0.0) or (self.lambda_denoising_steps is not None)) and split.startswith('train')): for lang_pair in self.lang_pairs: (_, tgt) = lang_pair.split('-') if (not split_exists(split, tgt, None, tgt)): continue filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt)) tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt]) tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt]) noising_dataset = NoisingDataset(tgt_dataset1, self.dicts[tgt], seed=1, max_word_shuffle_distance=self.args.max_word_shuffle_distance, word_dropout_prob=self.args.word_dropout_prob, word_blanking_prob=self.args.word_blanking_prob) noising_datasets[lang_pair] = self.alter_dataset_langtok(LanguagePairDataset(noising_dataset, tgt_dataset1.sizes, self.dicts[tgt], tgt_dataset2, tgt_dataset2.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target), src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt) logger.info('denoising-{}: {} {} {} examples'.format(tgt, data_path, split, len(noising_datasets[lang_pair]))) def language_pair_dataset(lang_pair): (src, tgt) = lang_pair.split('-') (src_dataset, tgt_dataset) = (src_datasets[lang_pair], tgt_datasets[lang_pair]) return self.alter_dataset_langtok(LanguagePairDataset(src_dataset, src_dataset.sizes, self.dicts[src], tgt_dataset, tgt_dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions), self.dicts[src].eos(), src, self.dicts[tgt].eos(), tgt) self.datasets[split] = RoundRobinZipDatasets(OrderedDict((([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in src_datasets.keys()] + [(_get_bt_dataset_key(lang_pair), dataset) for (lang_pair, dataset) in backtranslate_datasets.items()]) + [(_get_denoising_dataset_key(lang_pair), dataset) for (lang_pair, dataset) in noising_datasets.items()])), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang)))) def build_model(self, args): from fairseq import models model = models.build_model(args, self) if (not isinstance(model, FairseqMultiModel)): raise ValueError('SemisupervisedTranslationTask requires a FairseqMultiModel architecture') self.sequence_generators = {} if (((self.lambda_otf_bt > 0.0) or (self.lambda_otf_bt_steps is not None)) and self.training): for lang_pair in self.lang_pairs: (src, tgt) = lang_pair.split('-') key = '{}-{}'.format(tgt, src) self.sequence_generators[key] = SequenceGenerator(tgt_dict=self.dicts[src], beam_size=args.bt_beam_size, max_len_a=args.bt_max_len_a, max_len_b=args.bt_max_len_b) decoder_lang_tok_idx = self.get_decoder_langtok(src) def backtranslate_fn(sample, model=model.models[key], bos_token=decoder_lang_tok_idx, sequence_generator=self.sequence_generators[key]): return sequence_generator.generate([model], sample, bos_token=bos_token) self.backtranslators[lang_pair] = backtranslate_fn return model def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): model.train() (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {}) def forward_backward(model, samples, logging_output_key, weight): nonlocal agg_loss, agg_sample_size, agg_logging_output if ((samples is None) or (len(samples) == 0)): return (loss, sample_size, logging_output) = criterion(model, samples) if ignore_grad: loss *= 0 else: loss *= weight optimizer.backward(loss) agg_loss += loss.detach().item() agg_sample_size += sample_size agg_logging_output[logging_output_key] = logging_output if (self.lambda_parallel > 0.0): for lang_pair in self.lang_pairs: forward_backward(model.models[lang_pair], sample[lang_pair], lang_pair, self.lambda_parallel) if (self.lambda_otf_bt > 0.0): for lang_pair in self.lang_pairs: sample_key = _get_bt_dataset_key(lang_pair) forward_backward(model.models[lang_pair], sample[sample_key], sample_key, self.lambda_otf_bt) if (self.lambda_denoising > 0.0): for lang_pair in self.lang_pairs: (_, tgt) = lang_pair.split('-') sample_key = _get_denoising_dataset_key(lang_pair) forward_backward(model.models['{0}-{0}'.format(tgt)], sample[sample_key], sample_key, self.lambda_denoising) return (agg_loss, agg_sample_size, agg_logging_output) def update_step(self, num_updates): def lambda_step_func(config, n_iter): '\n Update a lambda value according to its schedule configuration.\n ' ranges = [i for i in range((len(config) - 1)) if (config[i][0] <= n_iter < config[(i + 1)][0])] if (len(ranges) == 0): assert (n_iter >= config[(- 1)][0]) return config[(- 1)][1] assert (len(ranges) == 1) i = ranges[0] (x_a, y_a) = config[i] (x_b, y_b) = config[(i + 1)] return (y_a + (((n_iter - x_a) * float((y_b - y_a))) / float((x_b - x_a)))) if (self.lambda_parallel_steps is not None): self.lambda_parallel = lambda_step_func(self.lambda_parallel_steps, num_updates) if (self.lambda_denoising_steps is not None): self.lambda_denoising = lambda_step_func(self.lambda_denoising_steps, num_updates) if (self.lambda_otf_bt_steps is not None): self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates) def aggregate_logging_outputs(self, logging_outputs, criterion): logging_output_keys = {key for logging_output in logging_outputs for key in logging_output} lang_pair_keys = set(((self.lang_pairs + [_get_bt_dataset_key(lang_pair) for lang_pair in self.lang_pairs]) + [_get_denoising_dataset_key(lang_pair) for lang_pair in self.lang_pairs])) logging_output_keys = logging_output_keys.intersection(lang_pair_keys) return super().aggregate_logging_outputs(logging_outputs, criterion, logging_output_keys)
@register_task('sentence_prediction') class SentencePredictionTask(FairseqTask): '\n Sentence (or sentence pair) prediction (classification or regression) task.\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', metavar='FILE', help='file prefix for data') parser.add_argument('--num-classes', type=int, default=(- 1), help='number of classes') parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item') parser.add_argument('--separator-token', type=int, default=None, help='add separator token between inputs') parser.add_argument('--regression-target', action='store_true', default=False) parser.add_argument('--no-shuffle', action='store_true', default=False) parser.add_argument('--truncate-sequence', action='store_true', default=False, help='truncate sequence to max-positions') parser.add_argument('--add-prev-output-tokens', action='store_true', default=False, help='add prev_output_tokens to sample, used for encoder-decoder arch') def __init__(self, args, data_dictionary, label_dictionary): super().__init__(args) self.dictionary = data_dictionary self._label_dictionary = label_dictionary if (not hasattr(args, 'max_positions')): self._max_positions = (args.max_source_positions, args.max_target_positions) else: self._max_positions = args.max_positions args.tokens_per_sample = self._max_positions @classmethod def load_dictionary(cls, args, filename, source=True): 'Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n ' dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary @classmethod def setup_task(cls, args, **kwargs): assert (args.num_classes > 0), 'Must set --num-classes' data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True) logger.info('[input] dictionary: {} types'.format(len(data_dict))) label_dict = None if (not args.regression_target): label_dict = cls.load_dictionary(args, os.path.join(args.data, 'label', 'dict.txt'), source=False) logger.info('[label] dictionary: {} types'.format(len(label_dict))) else: label_dict = data_dict return SentencePredictionTask(args, data_dict, label_dict) def load_dataset(self, split, combine=False, **kwargs): 'Load a given dataset split (e.g., train, valid, test).' def get_path(type, split): return os.path.join(self.args.data, type, split) def make_dataset(type, dictionary): split_path = get_path(type, split) dataset = data_utils.load_indexed_dataset(split_path, dictionary, self.args.dataset_impl, combine=combine) return dataset input0 = make_dataset('input0', self.source_dictionary) assert (input0 is not None), 'could not find dataset: {}'.format(get_path(type, split)) input1 = make_dataset('input1', self.source_dictionary) if (self.args.init_token is not None): input0 = PrependTokenDataset(input0, self.args.init_token) if (input1 is None): src_tokens = input0 else: if (self.args.separator_token is not None): input1 = PrependTokenDataset(input1, self.args.separator_token) src_tokens = ConcatSentencesDataset(input0, input1) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(src_tokens)) if self.args.truncate_sequence: src_tokens = TruncateDataset(src_tokens, self.args.max_positions) dataset = {'id': IdDataset(), 'net_input': {'src_tokens': RightPadDataset(src_tokens, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens, reduce=False)}, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens, reduce=True)} if self.args.add_prev_output_tokens: prev_tokens_dataset = RightPadDataset(RollDataset(src_tokens, 1), pad_idx=self.dictionary.pad()) dataset['net_input'].update(prev_output_tokens=prev_tokens_dataset) if (not self.args.regression_target): label_dataset = make_dataset('label', self.label_dictionary) if (label_dataset is not None): dataset.update(target=OffsetTokensDataset(StripTokenDataset(label_dataset, id_to_strip=self.label_dictionary.eos()), offset=(- self.label_dictionary.nspecial))) else: label_path = '{0}.label'.format(get_path('label', split)) if os.path.exists(label_path): dataset.update(target=RawLabelDataset([float(x.strip()) for x in open(label_path).readlines()])) nested_dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes]) if self.args.no_shuffle: dataset = nested_dataset else: dataset = SortDataset(nested_dataset, sort_order=[shuffle]) logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head(getattr(args, 'classification_head_name', 'sentence_classification_head'), num_classes=self.args.num_classes) return model def max_positions(self): return self._max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary @property def label_dictionary(self): return self._label_dictionary
@register_task('sentence_ranking') class SentenceRankingTask(FairseqTask): '\n Ranking task on multiple sentences.\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', metavar='FILE', help='file prefix for data') parser.add_argument('--num-classes', type=int, help='number of sentences to be ranked') parser.add_argument('--init-token', type=int, help='add token at the beginning of each batch item') parser.add_argument('--separator-token', type=int, help='add separator token between inputs') parser.add_argument('--no-shuffle', action='store_true') parser.add_argument('--truncate-sequence', action='store_true', help='Truncate sequence to max_positions') parser.add_argument('--max-option-length', type=int, help='max length for each option') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary @classmethod def load_dictionary(cls, args, filename, source=True): 'Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n ' dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary @classmethod def setup_task(cls, args, **kwargs): assert (args.criterion == 'sentence_ranking'), 'Must set --criterion=sentence_ranking' data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True) logger.info('[input] dictionary: {} types'.format(len(data_dict))) return SentenceRankingTask(args, data_dict) def load_dataset(self, split, combine=False, **kwargs): 'Load a given dataset split (e.g., train, valid, test).' def get_path(type, split): return os.path.join(self.args.data, type, split) def make_dataset(type, dictionary): split_path = get_path(type, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) return dataset input0 = make_dataset('input0', self.source_dictionary) input_options = [make_dataset('input{idx}'.format(idx=(idx + 1)), self.source_dictionary) for idx in range(self.args.num_classes)] if (self.args.separator_token is not None): input0 = PrependTokenDataset(input0, self.args.separator_token) src_tokens = [] for input_option in input_options: if (self.args.init_token is not None): input_option = PrependTokenDataset(input_option, self.args.init_token) if (self.args.max_option_length is not None): input_option = TruncateDataset(input_option, self.args.max_option_length) src_token = ConcatSentencesDataset(input_option, input0) if self.args.truncate_sequence: src_token = TruncateDataset(src_token, self.args.max_positions) src_tokens.append(src_token) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(src_tokens[0])) dataset = {'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True)} for src_token_idx in range(len(src_tokens)): dataset.update({'net_input{idx}'.format(idx=(src_token_idx + 1)): {'src_tokens': RightPadDataset(src_tokens[src_token_idx], pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens[src_token_idx], reduce=False)}}) label_path = '{}.label'.format(get_path('label', split)) if os.path.exists(label_path): with open(label_path) as h: dataset.update(target=RawLabelDataset([int(x.strip()) for x in h.readlines()])) nested_dataset = NestedDictionaryDataset(dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])]) if self.args.no_shuffle: dataset = nested_dataset else: dataset = SortDataset(nested_dataset, sort_order=[shuffle]) logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head(getattr(args, 'ranking_head_name', 'sentence_classification_head'), num_classes=1) return model def max_positions(self): return self.args.max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
def load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False): def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src)) elif (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) src_dataset = data_utils.load_indexed_dataset((prefix + src), src_dict, dataset_impl) if truncate_source: src_dataset = AppendTokenDataset(TruncateDataset(StripTokenDataset(src_dataset, src_dict.eos()), (max_source_positions - 1)), src_dict.eos()) src_datasets.append(src_dataset) tgt_datasets.append(data_utils.load_indexed_dataset((prefix + tgt), tgt_dict, dataset_impl)) logger.info('{} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)]))) if (not combine): break assert (len(src_datasets) == len(tgt_datasets)) if (len(src_datasets) == 1): (src_dataset, tgt_dataset) = (src_datasets[0], tgt_datasets[0]) else: sample_ratios = ([1] * len(src_datasets)) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) if prepend_bos: assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index')) src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) align_dataset = None if load_alignments: align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl) return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset.sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, max_source_positions=max_source_positions, max_target_positions=max_target_positions, align_dataset=align_dataset)
@register_task('translation') class TranslationTask(FairseqTask): '\n Translate from one (source) language to another (target) language.\n\n Args:\n src_dict (~fairseq.data.Dictionary): dictionary for the source language\n tgt_dict (~fairseq.data.Dictionary): dictionary for the target language\n\n .. note::\n\n The translation task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate` and :mod:`fairseq-interactive`.\n\n The translation task provides the following additional command-line\n arguments:\n\n .. argparse::\n :ref: fairseq.tasks.translation_parser\n :prog:\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions') parser.add_argument('--eval-bleu', action='store_true', help='evaluation with BLEU scores') parser.add_argument('--eval-bleu-detok', type=str, default='space', help='detokenizer before computing BLEU (e.g., "moses"); required if using --eval-bleu; use "space" to disable detokenization; see fairseq.data.encoders for other options') parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', help='args for building the tokenizer, if needed') parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, help='if setting, we compute tokenized BLEU instead of sacrebleu') parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE before computing BLEU') parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', help='generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\'') parser.add_argument('--eval-bleu-print-samples', action='store_true', help='print sample generations during validation') def __init__(self, args, src_dict, tgt_dict): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) paths = args.data.split(os.pathsep) assert (len(paths) > 0) if ((args.source_lang is None) or (args.target_lang is None)): (args.source_lang, args.target_lang) = data_utils.infer_language_pair(paths[0]) if ((args.source_lang is None) or (args.target_lang is None)): raise Exception('Could not infer language pair, please provide it explicitly') src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang))) tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang))) assert (src_dict.pad() == tgt_dict.pad()) assert (src_dict.eos() == tgt_dict.eos()) assert (src_dict.unk() == tgt_dict.unk()) logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict))) logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict))) return cls(args, src_dict, tgt_dict) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(os.pathsep) assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] (src, tgt) = (self.args.source_lang, self.args.target_lang) self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments, truncate_source=self.args.truncate_source) def build_dataset_for_inference(self, src_tokens, src_lengths): return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary) def build_model(self, args): if getattr(args, 'eval_bleu', False): assert (getattr(args, 'eval_bleu_detok', None) is not None), '--eval-bleu-detok is required if using --eval-bleu; try --eval-bleu-detok=moses (or --eval-bleu-detok=space to disable detokenization, e.g., when using sentencepiece)' detok_args = json.loads((getattr(args, 'eval_bleu_detok_args', '{}') or '{}')) self.tokenizer = encoders.build_tokenizer(Namespace(tokenizer=getattr(args, 'eval_bleu_detok', None), **detok_args)) gen_args = json.loads((getattr(args, 'eval_bleu_args', '{}') or '{}')) self.sequence_generator = self.build_generator(Namespace(**gen_args)) return super().build_model(args) def valid_step(self, sample, model, criterion): (loss, sample_size, logging_output) = super().valid_step(sample, model, criterion) if self.args.eval_bleu: bleu = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output['_bleu_sys_len'] = bleu.sys_len logging_output['_bleu_ref_len'] = bleu.ref_len assert (len(bleu.counts) == EVAL_BLEU_ORDER) for i in range(EVAL_BLEU_ORDER): logging_output[('_bleu_counts_' + str(i))] = bleu.counts[i] logging_output[('_bleu_totals_' + str(i))] = bleu.totals[i] return (loss, sample_size, logging_output) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.args.eval_bleu: def sum_logs(key): return sum((log.get(key, 0) for log in logging_outputs)) (counts, totals) = ([], []) for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs(('_bleu_counts_' + str(i)))) totals.append(sum_logs(('_bleu_totals_' + str(i)))) if (max(totals) > 0): metrics.log_scalar('_bleu_counts', np.array(counts)) metrics.log_scalar('_bleu_totals', np.array(totals)) metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len')) metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len')) def compute_bleu(meters): import inspect import sacrebleu fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0] if ('smooth_method' in fn_sig): smooth = {'smooth_method': 'exp'} else: smooth = {'smooth': 'exp'} bleu = sacrebleu.compute_bleu(correct=meters['_bleu_counts'].sum, total=meters['_bleu_totals'].sum, sys_len=meters['_bleu_sys_len'].sum, ref_len=meters['_bleu_ref_len'].sum, **smooth) return round(bleu.score, 2) metrics.log_derived('bleu', compute_bleu) def max_positions(self): 'Return the max sentence length allowed by the task.' return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): 'Return the source :class:`~fairseq.data.Dictionary`.' return self.src_dict @property def target_dictionary(self): 'Return the target :class:`~fairseq.data.Dictionary`.' return self.tgt_dict def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, escape_unk=False): s = self.tgt_dict.string(toks.int().cpu(), self.args.eval_bleu_remove_bpe, escape_unk=escape_unk) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample, None) (hyps, refs) = ([], []) for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]['tokens'])) refs.append(decode(utils.strip_pad(sample['target'][i], self.tgt_dict.pad()), escape_unk=True)) if self.args.eval_bleu_print_samples: logger.info(('example hypothesis: ' + hyps[0])) logger.info(('example reference: ' + refs[0])) tokenize = (sacrebleu.DEFAULT_TOKENIZER if (not self.args.eval_tokenized_bleu) else 'none') return sacrebleu.corpus_bleu(hyps, [refs], tokenize=tokenize)
@register_task('translation_from_pretrained_xlm') class TranslationFromPretrainedXLMTask(TranslationTask): '\n Same as TranslationTask except use the MaskedLMDictionary class so that\n we can load data that was binarized with the MaskedLMDictionary class.\n\n This task should be used for the entire training pipeline when we want to\n train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,\n training NMT with the pretrained XLM checkpoint, and subsequent evaluation\n of that trained model.\n ' @classmethod def load_dictionary(cls, filename): 'Load the masked LM dictionary from the filename\n\n Args:\n filename (str): the filename\n ' return MaskedLMDictionary.load(filename)
def tokenize_line(line): line = SPACE_NORMALIZER.sub(' ', line) line = line.strip() return line.split()
class Trainer(object): 'Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n ' def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None): self.args = args self.task = task self._criterion = criterion self._model = model self.cuda = (torch.cuda.is_available() and (not args.cpu)) if args.fp16: self._criterion = self._criterion.half() self._model = self._model.half() if self.cuda: self._criterion = self._criterion.cuda() self._model = self._model.cuda() self._dummy_batch = dummy_batch self._oom_batch = (oom_batch or dummy_batch) self._lr_scheduler = None self._num_updates = 0 self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None if (self.cuda and (args.distributed_world_size > 1)): self._grad_norm_buf = torch.cuda.DoubleTensor(args.distributed_world_size) else: self._grad_norm_buf = None metrics.log_start_time('wall', priority=790, round=0) @property def criterion(self): if (self._wrapped_criterion is None): if (utils.has_parameters(self._criterion) and (self.args.distributed_world_size > 1) and (not self.args.use_bmuf)): self._wrapped_criterion = models.DistributedFairseqModel(self.args, self._criterion) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if (self._wrapped_model is None): if ((self.args.distributed_world_size > 1) and (not self.args.use_bmuf)): self._wrapped_model = models.DistributedFairseqModel(self.args, self._model) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): if (self._optimizer is None): self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if (self._lr_scheduler is None): self._build_optimizer() return self._lr_scheduler def _build_optimizer(self): params = list(filter((lambda p: p.requires_grad), chain(self.model.parameters(), self.criterion.parameters()))) if self.args.fp16: if (self.cuda and (torch.cuda.get_device_capability(0)[0] < 7)): logger.info('NOTE: your device does NOT support faster training with --fp16, please switch to FP32 which is likely to be faster') if self.args.memory_efficient_fp16: self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.args, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params) else: if (self.cuda and (torch.cuda.get_device_capability(0)[0] >= 7)): logger.info('NOTE: your device may support faster training with --fp16') self._optimizer = optim.build_optimizer(self.args, params) if self.args.use_bmuf: self._optimizer = optim.FairseqBMUF(self.args, self._optimizer) self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer) self._lr_scheduler.step_update(0) def save_checkpoint(self, filename, extra_state): 'Save all training state in a checkpoint file.' if distributed_utils.is_master(self.args): extra_state['metrics'] = metrics.state_dict() checkpoint_utils.save_state(filename, self.args, self.get_model().state_dict(), self.get_criterion(), self.optimizer, self.lr_scheduler, self.get_num_updates(), self._optim_history, extra_state) def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False): 'Load all training state from a checkpoint file.' (extra_state, self._optim_history, last_optim_state) = (None, [], None) bexists = PathManager.isfile(filename) if bexists: state = checkpoint_utils.load_checkpoint_to_cpu(filename) try: self.get_model().load_state_dict(state['model'], strict=True, args=self.args) if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict(state['criterion'], strict=True) except Exception: raise Exception('Cannot load model parameters from checkpoint {}; please ensure that the architectures match.'.format(filename)) extra_state = state['extra_state'] self._optim_history = state['optimizer_history'] last_optim_state = state.get('last_optimizer_state', None) if ((last_optim_state is not None) and (not reset_optimizer)): self._build_optimizer() last_optim = self._optim_history[(- 1)] assert (last_optim['criterion_name'] == self.get_criterion().__class__.__name__), 'Criterion does not match; please reset the optimizer (--reset-optimizer).' assert (last_optim['optimizer_name'] == self.optimizer.__class__.__name__), 'Optimizer does not match; please reset the optimizer (--reset-optimizer).' if (not reset_lr_scheduler): self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state']) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim['num_updates']) if (extra_state is not None): epoch = extra_state['train_iterator']['epoch'] logger.info('loaded checkpoint {} (epoch {} @ {} updates)'.format(filename, epoch, self.get_num_updates())) self.lr_step(epoch) if (('metrics' in extra_state) and (not reset_meters)): metrics.load_state_dict(extra_state['metrics']) for meter in metrics.get_meters('default'): if isinstance(meter, TimeMeter): meter.reset() else: logger.info('no existing checkpoint found {}'.format(filename)) return extra_state def get_train_iterator(self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True): 'Return an EpochBatchIterator over the training set for a given epoch.' if load_dataset: logger.info('loading train data for epoch {}'.format(epoch)) self.task.load_dataset(self.args.train_subset, epoch=epoch, combine=combine, data_selector=data_selector) return self.task.get_batch_iterator(dataset=self.task.dataset(self.args.train_subset), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions(), self.args.max_tokens), ignore_invalid_inputs=True, required_batch_size_multiple=self.args.required_batch_size_multiple, seed=self.args.seed, num_shards=(self.args.distributed_world_size if shard_batch_itr else 1), shard_id=(self.args.distributed_rank if shard_batch_itr else 0), num_workers=self.args.num_workers, epoch=epoch) @metrics.aggregate('train') def train_step(self, samples, dummy_batch=False, raise_oom=False): 'Do forward, backward and parameter update.' if (self._dummy_batch is None): self._dummy_batch = samples[0] self._set_seed() self.model.train() self.criterion.train() self.zero_grad() if (not dummy_batch): metrics.log_start_time('train_wall', priority=800, round=0) (logging_outputs, sample_size, ooms) = ([], 0, 0) for (i, sample) in enumerate(samples): sample = self._prepare_sample(sample) if (sample is None): sample = self._prepare_sample(self._dummy_batch) ignore_grad = True else: ignore_grad = False def maybe_no_sync(): '\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n ' if ((self.args.distributed_world_size > 1) and hasattr(self.model, 'no_sync') and (i < (len(samples) - 1))): return self.model.no_sync() else: return contextlib.ExitStack() try: with maybe_no_sync(): (loss, sample_size_i, logging_output) = self.task.train_step(sample, self.model, self.criterion, self.optimizer, ignore_grad) del loss if (not ignore_grad): logging_outputs.append(logging_output) sample_size += sample_size_i if (self.cuda and (self.get_num_updates() == 0)): torch.cuda.empty_cache() except RuntimeError as e: if ('out of memory' in str(e)): self._log_oom(e) if raise_oom: raise e logger.warning('attempting to recover from OOM in forward/backward pass') ooms += 1 self.zero_grad() logger.info('I do not want to train with OOM errors. So raising exception here') raise e else: raise e if ((ooms > 0) and (self._oom_batch is not None)): self.handle_ooms(ooms) if dummy_batch: return None if self._sync_stats(): (logging_outputs, sample_size, ooms) = self._aggregate_logging_outputs(logging_outputs, sample_size, ooms) metrics.log_scalar('oom', ooms, len(samples), priority=600, round=3) if (ooms == (self.args.distributed_world_size * len(samples))): logger.warning('OOM in all workers, skipping update') self.zero_grad() return None try: if (sample_size > 0): if self._sync_stats(): self.optimizer.multiply_grads((self.args.distributed_world_size / sample_size)) else: self.optimizer.multiply_grads((1 / sample_size)) grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm) if (not self.args.use_bmuf): self._check_grad_norms(grad_norm) self.optimizer.step() self.set_num_updates((self.get_num_updates() + 1)) self.task.update_step(self.get_num_updates()) logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) metrics.log_speed('ups', 1.0, priority=100, round=2) metrics.log_scalar('gnorm', utils.item(grad_norm), priority=400, round=3) metrics.log_scalar('clip', (100 if (grad_norm > self.args.clip_norm > 0) else 0), priority=500, round=1) if ((self.args.empty_cache_freq > 0) and ((((self.get_num_updates() + self.args.empty_cache_freq) - 1) % self.args.empty_cache_freq) == 0) and torch.cuda.is_available() and (not self.args.cpu)): torch.cuda.empty_cache() except OverflowError as e: logger.info(('NOTE: overflow detected, ' + str(e))) self.zero_grad() logging_output = None except RuntimeError as e: if ('out of memory' in str(e)): self._log_oom(e) logger.error('OOM during optimization, irrecoverable') raise e if self.args.fp16: metrics.log_scalar('loss_scale', self.optimizer.scaler.loss_scale, priority=700, round=0) metrics.log_stop_time('train_wall') return logging_output @metrics.aggregate('valid') def valid_step(self, sample, raise_oom=False): 'Do forward pass in evaluation mode.' with torch.no_grad(): self.model.eval() self.criterion.eval() sample = self._prepare_sample(sample) if (sample is None): sample = self._prepare_sample(self._dummy_batch) ignore_results = True else: ignore_results = False try: (_loss, sample_size, logging_output) = self.task.valid_step(sample, self.model, self.criterion) except RuntimeError as e: if ('out of memory' in str(e)): self._log_oom(e) if (not raise_oom): logger.warning('ran out of memory in validation step, retrying batch') for p in self.model.parameters(): if (p.grad is not None): p.grad = None if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e if ignore_results: (logging_outputs, sample_size) = ([], 0) else: logging_outputs = [logging_output] if (self.args.distributed_world_size > 1): (logging_outputs, sample_size) = self._aggregate_logging_outputs(logging_outputs, sample_size) logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_output def dummy_train_step(self, dummy_batch): 'Dummy training step for warming caching allocator.' self.train_step(dummy_batch, dummy_batch=True) self.zero_grad() def handle_ooms(self, number_of_ooms): '\n c10d accumulates/syncs gradients between gpus during backward pass.\n In case of OOMs, gpus may fail to sync, so we manually iterate\n extra to make sure each gpu makes same number of iterations.\n ' logger.info('I do not want to train with OOM errors. So exiting here') if self.cuda: torch.cuda.empty_cache() exit() for _ in range(number_of_ooms): self.train_step([self._oom_batch], True) def zero_grad(self): self.optimizer.zero_grad() def lr_step(self, epoch, val_loss=None): 'Adjust the learning rate based on the validation loss.' self.lr_scheduler.step(epoch, val_loss) return self.lr_step_update() def lr_step_update(self): 'Update the learning rate after each update.' new_lr = self.lr_scheduler.step_update(self.get_num_updates()) metrics.log_scalar('lr', new_lr, weight=0, priority=300) return new_lr def get_lr(self): 'Get the current learning rate.' return self.optimizer.get_lr() def get_model(self): 'Get the (non-wrapped) model instance.' return self._model def get_criterion(self): 'Get the (non-wrapped) criterion instance.' return self._criterion def get_meter(self, name): '[deprecated] Get a specific meter by name.' from fairseq import meters if ('get_meter' not in self._warn_once): self._warn_once.add('get_meter') utils.deprecation_warning('Trainer.get_meter is deprecated. Please use fairseq.metrics instead.') train_meters = metrics.get_meters('train') if (train_meters is None): train_meters = {} if ((name == 'train_loss') and ('loss' in train_meters)): return train_meters['loss'] elif (name == 'train_nll_loss'): m = train_meters.get('nll_loss', None) return (m or meters.AverageMeter()) elif (name == 'wall'): m = metrics.get_meter('default', 'wall') return (m or meters.TimeMeter()) elif (name == 'wps'): m = metrics.get_meter('train', 'wps') return (m or meters.TimeMeter()) elif (name in {'valid_loss', 'valid_nll_loss'}): k = name[len('valid_'):] m = metrics.get_meter('valid', k) return (m or meters.AverageMeter()) elif (name in train_meters): return train_meters[name] return None def get_num_updates(self): 'Get the number of parameters updates.' return self._num_updates def set_num_updates(self, num_updates): 'Set the number of parameters updates.' self._num_updates = num_updates self.lr_step_update() metrics.log_scalar('num_updates', self._num_updates, weight=0, priority=200) def _prepare_sample(self, sample): if ((sample is None) or (len(sample) == 0)): return None if self.cuda: sample = utils.move_to_cuda(sample) def apply_half(t): if (t.dtype is torch.float32): return t.half() return t if self.args.fp16: sample = utils.apply_to_sample(apply_half, sample) return sample def _set_seed(self): seed = (self.args.seed + self.get_num_updates()) torch.manual_seed(seed) if self.cuda: torch.cuda.manual_seed(seed) def _sync_stats(self): return ((self.args.distributed_world_size > 1) and ((not self.args.use_bmuf) or (self.args.use_bmuf and (((self.get_num_updates() + 1) % self.args.global_sync_iter) == 0) and ((self.get_num_updates() + 1) > self.args.warmup_iterations)))) def _log_oom(self, exc): msg = 'OOM: Ran out of memory with exception: {}'.format(exc) logger.warning(msg) if (torch.cuda.is_available() and hasattr(torch.cuda, 'memory_summary')): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum): if self.get_criterion().__class__.logging_outputs_can_be_summed(): return self._fast_stat_sync_sum(logging_outputs, *extra_stats_to_sum) else: return self._all_gather_list_sync(logging_outputs, *extra_stats_to_sum) def _all_gather_list_sync(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum): '\n Sync logging outputs across workers. all_gather_list_sync is\n suitable when logging outputs are complex types.\n ' results = list(zip(*distributed_utils.all_gather_list(([logging_outputs] + list(extra_stats_to_sum)), max_size=getattr(self.args, 'all_gather_list_size', 16384)))) (logging_outputs, extra_stats_to_sum) = (results[0], results[1:]) logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return ([logging_outputs] + extra_stats_to_sum) def _fast_stat_sync_sum(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, min_buffer_size: int=50): '\n Sync logging outputs across workers. fast_stat_sync_sum is\n faster than all_gather_list_sync, but is only suitable when\n logging outputs are scalars and can be summed.\n ' num_extra = len(extra_stats_to_sum) if (len(logging_outputs) > 0): sorted_keys = sorted(logging_outputs[0].keys()) stats = (([0.0] + list(extra_stats_to_sum)) + [sum((log.get(k, 0) for log in logging_outputs)) for k in sorted_keys]) stats = (stats + ([0.0] * (min_buffer_size - len(stats)))) buf = torch.cuda.DoubleTensor(stats) else: buf = torch.zeros(min_buffer_size, dtype=torch.double, device='cuda') buf[0] = 1.0 distributed_utils.all_reduce(buf) buf = buf.tolist() fallback = buf[0] if (fallback > 0.0): return self._all_gather_list_sync(logging_outputs, *extra_stats_to_sum) else: (extra_stats_to_sum, stats) = (buf[1:(num_extra + 1)], buf[(num_extra + 1):]) stats = [{k: stats[i] for (i, k) in enumerate(sorted_keys)}] return ([stats] + extra_stats_to_sum) def _check_grad_norms(self, grad_norm): 'Check that grad norms are consistent across workers.' if (self._grad_norm_buf is not None): self._grad_norm_buf.zero_() self._grad_norm_buf[self.args.distributed_rank] = grad_norm distributed_utils.all_reduce(self._grad_norm_buf) if (not (self._grad_norm_buf == self._grad_norm_buf[0]).all()): raise RuntimeError('Fatal error: gradients are inconsistent between workers. Try --ddp-backend=no_c10d.') def _reduce_and_log_stats(self, logging_outputs, sample_size): with metrics.aggregate() as agg: logging_outputs = utils.apply_to_sample((lambda t: t.to(device='cpu', non_blocking=True)), logging_outputs) self.task.reduce_metrics(logging_outputs, self.get_criterion()) logging_output = agg.get_smoothed_values() logging_output['sample_size'] = sample_size for key_to_delete in ['ppl', 'wps', 'wpb', 'bsz']: if (key_to_delete in logging_output): del logging_output[key_to_delete] return logging_output
class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): ' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n also keeps track of how many of those we have seen ' if (next_word_prob is not None): self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, (self.count - self.missing_next_words))
def main(parsed_args): assert (parsed_args.path is not None), '--path required for evaluation!' utils.import_user_module(parsed_args) logger.info(parsed_args) use_cuda = (torch.cuda.is_available() and (not parsed_args.cpu)) task = tasks.setup_task(parsed_args) logger.info('loading model(s) from {}'.format(parsed_args.path)) (models, args) = checkpoint_utils.load_model_ensemble(parsed_args.path.split(os.pathsep), arg_overrides=eval(parsed_args.model_overrides), task=task) for arg in vars(parsed_args).keys(): if (arg not in {'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token'}): setattr(args, arg, getattr(parsed_args, arg)) args.tokens_per_sample -= args.context_window task = tasks.setup_task(args) task.load_dataset(args.gen_subset) dataset = task.dataset(args.gen_subset) if (args.context_window > 0): dataset = LMContextWindowDataset(dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad()) logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset))) for model in models: model.make_generation_fast_() if args.fp16: model.half() if use_cuda: model.cuda() assert (len(models) > 0) model_params = (sum((p.numel() for p in models[0].parameters())) / 1000000.0) logger.info('num. model params: {:.2f}'.format(model_params)) itr = task.get_batch_iterator(dataset=dataset, max_tokens=(args.max_tokens or 36000), max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers).next_epoch_itr(shuffle=False) gen_timer = StopwatchMeter() scorer = SequenceScorer(task.target_dictionary, args.softmax_batch) score_sum = 0.0 count = 0 if (args.remove_bpe is not None): if (args.remove_bpe == 'sentencepiece'): raise NotImplementedError else: bpe_cont = args.remove_bpe.rstrip() bpe_toks = {i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont)} bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: if ('net_input' not in sample): continue sample = (utils.move_to_cuda(sample) if use_cuda else sample) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample['ntokens']) for (i, hypos_i) in enumerate(hypos): hypo = hypos_i[0] sample_id = sample['id'][i] tokens = hypo['tokens'] tgt_len = tokens.numel() pos_scores = hypo['positional_scores'].float() if args.add_bos_token: assert (hypo['tokens'][0].item() == task.target_dictionary.bos()) tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if (bpe_toks is not None): for i in range((tgt_len - 1)): if (tokens[i].item() in bpe_toks): skipped_toks += 1 pos_scores[(i + 1)] += pos_scores[i] pos_scores[i] = 0 inf_scores = (pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))) if inf_scores.any(): logger.info('skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()])) pos_scores = pos_scores[(~ inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += (pos_scores.numel() - skipped_toks) if (args.output_word_probs or args.output_word_stats): w = '' word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += task.source_dictionary[w_ind] if ((bpe_toks is not None) and (w_ind in bpe_toks)): w = w[:(- bpe_len)] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = (i + 1) while (ind < len(tokens)): if (pos_scores[ind].item() != 0): next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob) is_bpe = False w = '' if args.output_word_probs: logger.info(((str(int(sample_id)) + ' ') + '\t'.join(('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob)))) wps_meter.update(sample['ntokens']) t.log({'wps': round(wps_meter.avg)}) avg_nll_loss = (((- score_sum) / count) / math.log(2)) logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, (1.0 / gen_timer.avg))) logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, (2 ** avg_nll_loss))) if args.res_file: with open(args.res_file, 'a') as res_file: res_file.write('\n \n \t Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, (2 ** avg_nll_loss))) res_file.write('\n \t Num. model params: {:.2f}\n'.format(model_params)) if args.output_word_stats: for ws in sorted(word_stats.values(), key=(lambda x: x.count), reverse=True): logger.info(ws)
def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) main(args)
def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded('utf-8')) as h: for src_str in h: buffer.append(src_str.strip()) if (len(buffer) >= buffer_size): (yield buffer) buffer = [] if (len(buffer) > 0): (yield buffer)
def make_batches(lines, args, task, max_positions, encode_fn): tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines] lengths = [t.numel() for t in tokens] itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test).next_epoch_itr(shuffle=False) for batch in itr: (yield Batch(ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths']))
def main(args): utils.import_user_module(args) if (args.buffer_size < 1): args.buffer_size = 1 if ((args.max_tokens is None) and (args.max_sentences is None)): args.max_sentences = 1 assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --beam' assert ((not args.max_sentences) or (args.max_sentences <= args.buffer_size)), '--max-sentences/--batch-size cannot be larger than --buffer-size' logger.info(args) use_cuda = (torch.cuda.is_available() and (not args.cpu)) task = tasks.setup_task(args) logger.info('loading model(s) from {}'.format(args.path)) (models, _model_args) = checkpoint_utils.load_model_ensemble(args.path.split(os.pathsep), arg_overrides=eval(args.model_overrides), task=task) src_dict = task.source_dictionary tgt_dict = task.target_dictionary for model in models: model.make_generation_fast_(beamable_mm_beam_size=(None if args.no_beamable_mm else args.beam), need_attn=args.print_alignment) if args.fp16: model.half() if use_cuda: model.cuda() generator = task.build_generator(args) tokenizer = encoders.build_tokenizer(args) bpe = encoders.build_bpe(args) def encode_fn(x): if (tokenizer is not None): x = tokenizer.encode(x) if (bpe is not None): x = bpe.encode(x) return x def decode_fn(x): if (bpe is not None): x = bpe.decode(x) if (tokenizer is not None): x = tokenizer.decode(x) return x align_dict = utils.load_align_dict(args.replace_unk) max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models]) if (args.buffer_size > 1): logger.info('Sentence buffer size: %s', args.buffer_size) logger.info('NOTE: hypothesis and token scores are output in base 2') logger.info('Type the input sentence and press return:') start_id = 0 for inputs in buffered_read(args.input, args.buffer_size): results = [] for batch in make_batches(inputs, args, task, max_positions, encode_fn): src_tokens = batch.src_tokens src_lengths = batch.src_lengths if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}} translations = task.inference_step(generator, models, sample) for (i, (id, hypos)) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) results.append(((start_id + id), src_tokens_i, hypos)) for (id, src_tokens, hypos) in sorted(results, key=(lambda x: x[0])): if (src_dict is not None): src_str = src_dict.string(src_tokens, args.remove_bpe) print('S-{}\t{}'.format(id, src_str)) for hypo in hypos[:min(len(hypos), args.nbest)]: (hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe) hypo_str = decode_fn(hypo_str) score = (hypo['score'] / math.log(2)) print('H-{}\t{}\t{}'.format(id, score, hypo_str)) print('P-{}\t{}'.format(id, ' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].div_(math.log(2)).tolist())))) if args.print_alignment: alignment_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in alignment]) print('A-{}\t{}'.format(id, alignment_str)) start_id += len(inputs)
def cli_main(): parser = options.get_generation_parser(interactive=True) args = options.parse_args_and_arch(parser) main(args)
def main(args): utils.import_user_module(args) os.makedirs(args.destdir, exist_ok=True) logger.addHandler(logging.FileHandler(filename=os.path.join(args.destdir, 'preprocess.log'))) logger.info(args) task = tasks.get_task(args.task) def train_path(lang): return '{}{}'.format(args.trainpref, (('.' + lang) if lang else '')) def file_name(prefix, lang): fname = prefix if (lang is not None): fname += '.{lang}'.format(lang=lang) return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return (dest_path('dict', lang) + '.txt') def build_dictionary(filenames, src=False, tgt=False): assert (src ^ tgt) return task.build_dictionary(filenames, workers=args.workers, threshold=(args.thresholdsrc if src else args.thresholdtgt), nwords=(args.nwordssrc if src else args.nwordstgt), padding_factor=args.padding_factor) target = (not args.only_source) if ((not args.srcdict) and os.path.exists(dict_path(args.source_lang))): raise FileExistsError(dict_path(args.source_lang)) if (target and (not args.tgtdict) and os.path.exists(dict_path(args.target_lang))): raise FileExistsError(dict_path(args.target_lang)) if args.joined_dictionary: assert ((not args.srcdict) or (not args.tgtdict)), 'cannot use both --srcdict and --tgtdict with --joined-dictionary' if args.srcdict: src_dict = task.load_dictionary(args.srcdict) elif args.tgtdict: src_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, '--trainpref must be set if --srcdict is not specified' src_dict = build_dictionary({train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True) tgt_dict = src_dict else: if args.srcdict: src_dict = task.load_dictionary(args.srcdict) else: assert args.trainpref, '--trainpref must be set if --srcdict is not specified' src_dict = build_dictionary([train_path(args.source_lang)], src=True) if target: if args.tgtdict: tgt_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, '--trainpref must be set if --tgtdict is not specified' tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True) else: tgt_dict = None src_dict.save(dict_path(args.source_lang)) if (target and (tgt_dict is not None)): tgt_dict.save(dict_path(args.target_lang)) def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers): logger.info('[{}] Dictionary: {} types'.format(lang, (len(vocab) - 1))) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result['replaced']) n_seq_tok[0] += worker_result['nseq'] n_seq_tok[1] += worker_result['ntok'] input_file = '{}{}'.format(input_prefix, (('.' + lang) if (lang is not None) else '')) offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) pool.apply_async(binarize, (args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab)) merge_result(Binarizer.binarize(input_file, vocab, (lambda t: ds.add_item(t)), offset=0, end=offsets[1])) if (num_workers > 1): pool.join() for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) logger.info('[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(lang, input_file, n_seq_tok[0], n_seq_tok[1], ((100 * sum(replaced.values())) / n_seq_tok[1]), vocab.unk_word)) def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers): nseq = [0] def merge_result(worker_result): nseq[0] += worker_result['nseq'] input_file = input_prefix offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) pool.apply_async(binarize_alignments, (args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl) merge_result(Binarizer.binarize_alignments(input_file, utils.parse_alignment, (lambda t: ds.add_item(t)), offset=0, end=offsets[1])) if (num_workers > 1): pool.join() for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, None) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx')) logger.info('[alignments] {}: parsed {} alignments'.format(input_file, nseq[0])) def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1): if (args.dataset_impl == 'raw'): output_text_file = dest_path((output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang)), lang) shutil.copyfile(file_name(input_prefix, lang), output_text_file) else: make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers) def make_all(lang, vocab): if args.trainpref: make_dataset(vocab, args.trainpref, 'train', lang, num_workers=args.workers) if args.validpref: for (k, validpref) in enumerate(args.validpref.split(',')): outprefix = ('valid{}'.format(k) if (k > 0) else 'valid') make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers) if args.testpref: for (k, testpref) in enumerate(args.testpref.split(',')): outprefix = ('test{}'.format(k) if (k > 0) else 'test') make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers) def make_all_alignments(): if (args.trainpref and os.path.exists(((args.trainpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.trainpref + '.') + args.align_suffix), 'train.align', num_workers=args.workers) if (args.validpref and os.path.exists(((args.validpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.validpref + '.') + args.align_suffix), 'valid.align', num_workers=args.workers) if (args.testpref and os.path.exists(((args.testpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.testpref + '.') + args.align_suffix), 'test.align', num_workers=args.workers) make_all(args.source_lang, src_dict) if target: make_all(args.target_lang, tgt_dict) if args.align_suffix: make_all_alignments() logger.info('Wrote preprocessed data to {}'.format(args.destdir)) if args.alignfile: assert args.trainpref, '--trainpref must be set if --alignfile is specified' src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) freq_map = {} with open(args.alignfile, 'r', encoding='utf-8') as align_file: with open(src_file_name, 'r', encoding='utf-8') as src_file: with open(tgt_file_name, 'r', encoding='utf-8') as tgt_file: for (a, s, t) in zip_longest(align_file, src_file, tgt_file): si = src_dict.encode_line(s, add_if_not_exist=False) ti = tgt_dict.encode_line(t, add_if_not_exist=False) ai = list(map((lambda x: tuple(x.split('-'))), a.split())) for (sai, tai) in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if ((srcidx != src_dict.unk()) and (tgtidx != tgt_dict.unk())): assert (srcidx != src_dict.pad()) assert (srcidx != src_dict.eos()) assert (tgtidx != tgt_dict.pad()) assert (tgtidx != tgt_dict.eos()) if (srcidx not in freq_map): freq_map[srcidx] = {} if (tgtidx not in freq_map[srcidx]): freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(args.source_lang, args.target_lang)), 'w', encoding='utf-8') as f: for (k, v) in align_dict.items(): print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab)) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl, vocab_size=None) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx')) return res
def dataset_dest_prefix(args, output_prefix, lang): base = '{}/{}'.format(args.destdir, output_prefix) if (lang is not None): lang_part = '.{}-{}.{}'.format(args.source_lang, args.target_lang, lang) elif args.only_source: lang_part = '' else: lang_part = '.{}-{}'.format(args.source_lang, args.target_lang) return '{}{}'.format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return '{}.{}'.format(base, extension)
def get_offsets(input_file, num_workers): return Binarizer.find_offsets(input_file, num_workers)
def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args)
def get_parser(): parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') return parser