repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
sign-topic
sign-topic-main/fairseq/optim/adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import math from collections.abc import Collection from dataclasses import dataclass, field from typing import Any, List import torch import torch.distributed as dist import torch.optim from fairseq.dataclass import FairseqDataclass from fairseq.optim import FairseqOptimizer, register_optimizer from fairseq.optim.fused_adam import get_fused_adam_class from omegaconf import II, OmegaConf logger = logging.getLogger(__name__) @dataclass class FairseqAdamConfig(FairseqDataclass): adam_betas: Any = field( default=(0.9, 0.999), metadata={"help": "betas for Adam optimizer"} ) adam_eps: float = field( default=1e-8, metadata={"help": "epsilon for Adam optimizer"} ) weight_decay: float = field(default=0.0, metadata={"help": "weight decay"}) use_old_adam: bool = field( default=False, metadata={"help": "Use fairseq.optim.adam.Adam"} ) fp16_adam_stats: bool = field( default=False, metadata={"help": "use FP16 stats (with automatic scaling)"} ) # TODO common vars below in parent tpu: bool = II("common.tpu") lr: List[float] = II("optimization.lr") @register_optimizer("adam", dataclass=FairseqAdamConfig) class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg: FairseqAdamConfig, params): super().__init__(cfg) fused_adam_cls = get_fused_adam_class() use_fused_adam = ( not getattr(cfg, "use_old_adam", False) and fused_adam_cls is not None and torch.cuda.is_available() ) if getattr(cfg, "tpu", False): if self.cfg.fp16_adam_stats: raise NotImplementedError("--fp16-adam-stats is only supported on GPU") # on TPUs we use the Adam defined here, since it # automatically casts gradients to FP32 self._optimizer = Adam(params, **self.optimizer_config) elif use_fused_adam: logger.info("using FusedAdam") self._optimizer = fused_adam_cls( params, use_fp16_stats=self.cfg.fp16_adam_stats, **self.optimizer_config ) else: if self.cfg.fp16_adam_stats: raise NotImplementedError( "--fp16-adam-stats is only supported with FusedAdamV1" ) self._optimizer = Adam(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas) if isinstance(self.cfg.adam_betas, str) else OmegaConf.to_container(self.cfg.adam_betas), "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): r"""Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad ) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( "Adam does not support sparse gradients, please consider SparseAdam instead" ) amsgrad = group.get("amsgrad", False) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) if amsgrad: state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( p_data_fp32 ) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] if amsgrad: max_exp_avg_sq = state["max_exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group["eps"]) else: denom = exp_avg_sq.sqrt().add_(group["eps"]) bias_correction1 = 1 - beta1 ** state["step"] bias_correction2 = 1 - beta2 ** state["step"] step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
9,184
37.270833
100
py
sign-topic
sign-topic-main/fairseq/optim/adafactor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adafactor") class FairseqAdafactor(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = Adafactor(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E", help='epsilons for Adafactor optimizer') parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C", help='threshold for clipping update root mean square') parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D", help='decay rate of the second moment estimator') parser.add_argument('--beta1', type=float, default=None, metavar="B", help='beta for first moment estimator. Optional') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter') parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep,' 'otherwise use external learning rate') parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. Note : Convergence issues empirically observed with fp16 on. Might require search for appropriate configuration. """ return { "lr": self.args.lr[0], "eps": eval(self.args.adafactor_eps), "clip_threshold": self.args.clip_threshold, "decay_rate": self.args.decay_rate, "beta1": self.args.beta1, "weight_decay": self.args.weight_decay, "scale_parameter": self.args.scale_parameter, # defaults to False "relative_step": self.args.relative_step, # defaults to False "warmup_init": self.args.warmup_init, } class Adafactor(torch.optim.Optimizer): """Implements Adafactor algorithm. This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` (see https://arxiv.org/abs/1804.04235) Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): external learning rate (default: None) eps (tuple[float, float]): regularization constans for square gradient and parameter scale respectively (default: (1e-30, 1e-3)) clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) beta1 (float): coefficient used for computing running averages of gradient (default: None) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) relative_step (bool): if True, time-dependent learning rate is computed instead of external learning rate (default: True) warmup_init (bool): time-dependent learning rate computation depends on whether warm-up initialization is being used (default: False) """ def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): if lr is not None and relative_step: raise ValueError("Cannot combine manual lr and relative_step options") if warmup_init and not relative_step: raise ValueError("warmup_init requires relative_step=True") defaults = dict( lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, ) super(Adafactor, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = ( 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 ) rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz def _get_options(self, param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment def _rms(self, tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = ( (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)) .rsqrt_() .unsqueeze(-1) ) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros( grad_shape[:-2] + grad_shape[-1:] ).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) group["lr"] = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad ** 2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_( update.mean(dim=-1), alpha=1.0 - beta2t ) exp_avg_sq_col.mul_(beta2t).add_( update.mean(dim=-2), alpha=1.0 - beta2t ) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_( (self._rms(update) / group["clip_threshold"]).clamp_(min=1.0) ) update.mul_(group["lr"]) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"]) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
10,902
39.531599
92
py
sign-topic
sign-topic-main/fairseq/optim/fused_adam.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import types import torch def get_fused_adam_class(): """ Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. """ try: # The "deprecated" interface in recent versions of apex is a bit # faster than the main interface, since we don't use the apex # optimizer. This can be installed by passing the # `--deprecated_fused_adam` option when building apex. global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") return FusedAdamV1 except ImportError: try: # fallback to the newer interface from apex.multi_tensor_apply import multi_tensor_applier from apex.optimizers import FusedAdam as _FusedAdam # noqa if multi_tensor_applier.available: return FusedAdamV2 except ImportError: pass return None class FusedAdamV1(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False, use_fp16_stats=False, ): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError("FusedAdam does not support the AMSGrad variant.") defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, "max_grad_norm": max_grad_norm, } super().__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 self.use_fp16_stats = use_fp16_stats self.FLOAT16_MAX = 65504.0 @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @property def supports_step_with_scale(self): return True def step(self, closure=None, grads=None, scale=1.0, grad_norms=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None] * len(self.param_groups) for group, grads_this_group, grad_norm in zip( self.param_groups, grads_group, grad_norms ): if grads_this_group is None: grads_this_group = [None] * len(group["params"]) # compute combined scale factor for this group combined_scale = scale if group.get("max_grad_norm", 0) > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group.get("bias_correction", 1) else 0 for p, grad in zip(group["params"], grads_this_group): # note: p.grad should not ever be set for correct # operation of mixed precision optimizer that sometimes # sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) if p.device.type == "cpu": p_data_fp32 = p.data.cuda(non_blocking=True).float() out_p = torch.tensor([], dtype=torch.float) else: p_data_fp32 = p.data.float() out_p = p.data state = self.state[p] # State initialization dtype = torch.float16 if self.use_fp16_stats else p_data_fp32.dtype if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32, dtype=dtype) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32, dtype=dtype) if self.use_fp16_stats: state["exp_avg_scale"] = 1.0 state["exp_avg_sq_scale"] = 1.0 else: device = p_data_fp32.device state["exp_avg"] = state["exp_avg"].to(device, dtype) state["exp_avg_sq"] = state["exp_avg_sq"].to(device, dtype) exp_avg = state["exp_avg"] exp_avg_sq = state["exp_avg_sq"] if self.use_fp16_stats: assert exp_avg.dtype == torch.float16 exp_avg = exp_avg.float() * state["exp_avg_scale"] exp_avg_sq = exp_avg_sq.float() * state["exp_avg_sq_scale"] beta1, beta2 = group["betas"] state["step"] += 1 with torch.cuda.device(p_data_fp32.device): fused_adam_cuda.adam( p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group["lr"], beta1, beta2, group["eps"], combined_scale, state["step"], self.eps_mode, bias_correction, group["weight_decay"], ) if p.device.type == "cpu": p.data.copy_(p_data_fp32, non_blocking=True) if self.use_fp16_stats: def inf_norm(t): return torch.norm(t, float("inf")) # from github.com/openai/jukebox/blob/master/jukebox/utils/fp16.py state["exp_avg_scale"], state["exp_avg_sq_scale"] = ( 1e-8 + inf_norm(exp_avg) / self.FLOAT16_MAX, 1e-8 + inf_norm(exp_avg_sq) / self.FLOAT16_MAX, ) state["exp_avg"], state["exp_avg_sq"] = ( (exp_avg / state["exp_avg_scale"]).half(), (exp_avg_sq / state["exp_avg_sq_scale"]).half(), ) return loss try: from apex.multi_tensor_apply import multi_tensor_applier from apex.optimizers import FusedAdam class FusedAdamV2(FusedAdam): """ Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. """ def __init__(self, *args, use_fp16_stats=False, **kwargs): if use_fp16_stats: raise NotImplementedError( "--fp16-adam-stats is only supported with FusedAdamV1" ) super().__init__(*args, **kwargs) if not hasattr(self, "multi_tensor_adam"): raise Exception( "Apex installation is outdated. Please install an updated version of apex." ) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step( self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, ): """Performs a single optimization step.""" loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group["bias_correction"] else 0 beta1, beta2 = group["betas"] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if "step" in group: group["step"] += 1 else: group["step"] = 1 # create lists for multi-tensor apply g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group["params"]: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=torch.float ) else: state["exp_avg"] = state["exp_avg"].to( device=p.data.device, dtype=torch.float ) state["exp_avg_sq"] = state["exp_avg_sq"].to( device=p.data.device, dtype=torch.float ) if p.dtype == torch.float16: g_16.append(p.grad.data.float()) p_16.append(p.data.float()) orig_p_16.append(p.data) m_16.append(state["exp_avg"]) v_16.append(state["exp_avg_sq"]) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state["exp_avg"]) v_32.append(state["exp_avg_sq"]) else: raise RuntimeError("FusedAdam only support fp16 and fp32.") with torch.cuda.device(p.device): if len(g_16) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) for orig_p, p in zip(orig_p_16, p_16): orig_p.copy_(p.data) if len(g_32) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) return loss except ImportError: pass
15,188
38.248062
104
py
sign-topic
sign-topic-main/fairseq/optim/adagrad.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adagrad") class Adagrad(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return False
1,279
30.219512
92
py
sign-topic
sign-topic-main/fairseq/optim/fairseq_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.dataclass.utils import gen_parser_from_dataclass class FairseqOptimizer(object): def __init__(self, cfg): super().__init__() self.cfg = cfg @classmethod def add_args(cls, parser): """Add optimizer-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) @property def optimizer(self): """Return a torch.optim.optimizer.Optimizer instance.""" if not hasattr(self, "_optimizer"): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") return self._optimizer @optimizer.setter def optimizer(self, optimizer): """Reset optimizer instance.""" if not hasattr(self, "_optimizer"): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError("_optimizer must be an instance of torch.optim.Optimizer") self._optimizer = optimizer @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ raise NotImplementedError @property def params(self): """Return an iterable of the parameters held by the optimizer.""" for param_group in self.param_groups: for p in param_group["params"]: yield p @property def param_groups(self): return self.optimizer.param_groups def __getstate__(self): return self._optimizer.__getstate__() def get_lr(self): """Return the current learning rate.""" return self.param_groups[0]["lr"] def set_lr(self, lr): """Set the learning rate.""" for param_group in self.param_groups: param_group["lr"] = lr def state_dict(self): """Return the optimizer's state dict.""" return self.optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ self.optimizer.load_state_dict(state_dict) if optimizer_overrides is not None and len(optimizer_overrides) > 0: # override learning rate, momentum, etc. with latest values for group in self.param_groups: group.update(optimizer_overrides) def backward(self, loss): """Computes the sum of gradients of the given tensor w.r.t. graph leaves.""" loss.backward() def all_reduce_grads(self, module): """Manually all-reduce gradients (if required).""" if hasattr(module, "all_reduce_grads"): module.all_reduce_grads() def multiply_grads(self, c): """Multiplies grads by a constant *c*.""" for p in self.params: if p.grad is not None: if torch.is_tensor(c): c = c.to(p.grad.device) p.grad.data.mul_(c) def clip_grad_norm(self, max_norm, aggregate_norm_fn=None): """Clips gradient norm.""" return utils.clip_grad_norm_(self.params, max_norm, aggregate_norm_fn) def step(self, closure=None, scale=1.0, groups=None): """Performs a single optimization step.""" if self.supports_step_with_scale: if self.supports_groups: self.optimizer.step(closure, scale=scale, groups=groups) else: self.optimizer.step(closure, scale=scale) else: if scale != 1.0: self.multiply_grads(1.0 / scale) if self.supports_groups: self.optimizer.step(closure, groups=groups) else: self.optimizer.step(closure) def zero_grad(self): """Clears the gradients of all optimized parameters.""" for p in self.params: p.grad = None self.optimizer.zero_grad() @property def supports_memory_efficient_fp16(self): if hasattr(self.optimizer, "supports_memory_efficient_fp16"): return self.optimizer.supports_memory_efficient_fp16 return False @property def supports_step_with_scale(self): if hasattr(self.optimizer, "supports_step_with_scale"): return self.optimizer.supports_step_with_scale return False @property def supports_groups(self): if hasattr(self.optimizer, "supports_groups"): return self.optimizer.supports_groups return False @property def supports_flat_params(self): """ Whether the optimizer supports collapsing of the model parameters/gradients into a single contiguous Tensor. """ if hasattr(self.optimizer, "supports_flat_params"): return self.optimizer.supports_flat_params return False def average_params(self): pass def broadcast_global_state_dict(self, state_dict): """ Broadcasts a global state dict to all ranks. Useful for optimizers that shard state between ranks. """ if hasattr(self.optimizer, "broadcast_global_state_dict"): return self.optimizer.broadcast_global_state_dict(state_dict) else: return state_dict class LegacyFairseqOptimizer(FairseqOptimizer): def __init__(self, args): self.args = args
6,176
33.316667
87
py
sign-topic
sign-topic-main/fairseq/optim/adadelta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.optim from . import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adadelta") class Adadelta(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients') parser.add_argument('--adadelta-eps', type=float, default=1e-6, metavar='EPS', help='term added to the denominator to improve numerical stability') parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "rho": self.args.adadelta_rho, "eps": self.args.adadelta_eps, "weight_decay": self.args.weight_decay, } @property def supports_flat_params(self): return True
1,835
37.25
105
py
sign-topic
sign-topic-main/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class InverseSquareRootLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=4000, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = II("optimization.lr") @register_lr_scheduler("inverse_sqrt", dataclass=InverseSquareRootLRScheduleConfig) class InverseSquareRootSchedule(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: decay_factor = cfg.lr * sqrt(cfg.warmup_updates) lr = decay_factor / sqrt(update_num) """ def __init__(self, cfg: InverseSquareRootLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with inverse_sqrt." " Consider --lr-scheduler=fixed instead." ) warmup_end_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * cfg.warmup_updates ** 0.5 # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: self.lr = self.decay_factor * num_updates ** -0.5 self.optimizer.set_lr(self.lr) return self.lr
3,232
36.593023
87
py
sign-topic
sign-topic-main/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import Optional, List, Tuple from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class TriStageLRScheduleConfig(FairseqDataclass): warmup_steps: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) hold_steps: int = field( default=0, metadata={"help": "steps in hold stage"}, ) decay_steps: int = field( default=0, metadata={"help": "steps in decay stages"}, ) phase_ratio: Optional[Tuple[float, float, float]] = field( default=None, metadata={ "help": ( "if set, automatically sets warmup/hold/decay steps to the ratio " "specified here from max_updates. the ratios must add up to 1.0" ) }, ) init_lr_scale: float = field( default=0.01, metadata={"help": "initial learning rate scale during warmup phase"}, ) final_lr_scale: float = field( default=0.01, metadata={"help": "final learning rate scale"}, ) max_update: float = II("optimization.max_update") lr: List[float] = II("optimization.lr") @register_lr_scheduler("tri_stage", dataclass=TriStageLRScheduleConfig) class TriStageLRSchedule(FairseqLRScheduler): """Tristage learning rate schedulr Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf Similar to inverse_squre_root scheduler, but tri_stage learning rate employs three stages LR scheduling: - warmup stage, starting from `lr` * `init_lr_scale`, linearly increased to `lr` in `warmup_steps` iterations - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps` iterations - decay stage, after hold stage, decay LR exponetially to `lr` * `final_lr_scale` in `decay_steps`; after that LR is keep as `final_lr_scale` * `lr` During warmup:: init_lr = cfg.init_lr_scale * cfg.lr lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps) lr = lrs[update_num] During hold:: lr = cfg.lr During decay:: decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor) After that:: lr = cfg.lr * cfg.final_lr_scale """ def __init__(self, cfg: TriStageLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with tri-stage lr." " Consider --lr-scheduler=fixed instead." ) # calculate LR at each point self.peak_lr = cfg.lr[0] self.init_lr = cfg.init_lr_scale * cfg.lr[0] self.final_lr = cfg.final_lr_scale * cfg.lr[0] if cfg.phase_ratio is not None: assert cfg.max_update > 0 assert sum(cfg.phase_ratio) == 1, "phase ratios must add up to 1" self.warmup_steps = int(cfg.max_update * cfg.phase_ratio[0]) self.hold_steps = int(cfg.max_update * cfg.phase_ratio[1]) self.decay_steps = int(cfg.max_update * cfg.phase_ratio[2]) else: self.warmup_steps = cfg.warmup_steps self.hold_steps = cfg.hold_steps self.decay_steps = cfg.decay_steps assert ( self.warmup_steps + self.hold_steps + self.decay_steps > 0 ), "please specify steps or phase_ratio" self.warmup_rate = ( (self.peak_lr - self.init_lr) / self.warmup_steps if self.warmup_steps != 0 else 0 ) self.decay_factor = -math.log(cfg.final_lr_scale) / self.decay_steps # initial learning rate self.lr = self.init_lr self.optimizer.set_lr(self.lr) def _decide_stage(self, update_step): """ return stage, and the corresponding steps within the current stage """ if update_step < self.warmup_steps: # warmup state return 0, update_step offset = self.warmup_steps if update_step < offset + self.hold_steps: # hold stage return 1, update_step - offset offset += self.hold_steps if update_step <= offset + self.decay_steps: # decay stage return 2, update_step - offset offset += self.decay_steps # still here ? constant lr stage return 3, update_step - offset def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" stage, steps_in_stage = self._decide_stage(num_updates) if stage == 0: self.lr = self.init_lr + self.warmup_rate * steps_in_stage elif stage == 1: self.lr = self.peak_lr elif stage == 2: self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage) elif stage == 3: self.lr = self.final_lr else: raise ValueError("Undefined stage") self.optimizer.set_lr(self.lr) return self.lr
5,766
31.767045
87
py
sign-topic
sign-topic-main/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import List import torch.optim.lr_scheduler from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class ReduceLROnPlateauLRScheduleConfig(FairseqDataclass): lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) lr_threshold: float = field( default=1e-4, metadata={ "help": ( "threshold for measuring the new optimum, to only focus on " "significant changes" ) }, ) lr_patience: int = field( default=0, metadata={ "help": ( "number of epochs with no improvement after which learning rate will " "be reduced" ) }, ) warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = II("optimization.lr") maximize_best_checkpoint_metric: bool = II( "checkpoint.maximize_best_checkpoint_metric" ) @register_lr_scheduler( "reduce_lr_on_plateau", dataclass=ReduceLROnPlateauLRScheduleConfig ) class ReduceLROnPlateauLRSchedule(FairseqLRScheduler): """ Decay the LR by a factor every time the validation loss plateaus. Also comes with optional warmup phase, where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme. During warmup:: lrs = torch.linspace( cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates ) lr = lrs[update_num] """ def __init__(self, cfg: ReduceLROnPlateauLRScheduleConfig, optimizer): super().__init__(cfg, optimizer) if len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with reduce_lr_on_plateau." " Consider --lr-scheduler=fixed instead." ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=cfg.lr_patience, factor=cfg.lr_shrink, mode="max" if cfg.maximize_best_checkpoint_metric else "min", threshold=cfg.lr_threshold, ) warmup_end_lr = cfg.lr[0] # if no warm up, sets initial lr to be cfg.lr[0] if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = 0 if cfg.warmup_updates > 0 else warmup_end_lr # linearly warmup for the first cfg.warmup_updates if cfg.warmup_updates > 0: self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates # this flag is either set from arg when no warm up, or set by # step_update() when warmup finishes self.warmup_end = True if cfg.warmup_updates <= 0 else False # initial learning rate # this self.lr is used only during init and/or warm up period self.lr = warmup_end_lr if self.warmup_end else cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def state_dict(self): """Return the LR scheduler state dict.""" return { "best": self.lr_scheduler.best, "last_epoch": self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.lr_scheduler.best = state_dict["best"] if "last_epoch" in state_dict: self.lr_scheduler.last_epoch = state_dict["last_epoch"] def step(self, epoch, val_loss=None): """ Update the learning rate at the end of the given epoch if warmup finishes otherwise no update of lr on epoch boundaries """ if val_loss is not None and self.warmup_end is True: self.lr_scheduler.step(val_loss) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr() def step_update(self, num_updates): """ Update the learning rate after each update.""" # if there is warmup if self.cfg.warmup_updates > 0: if num_updates <= self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step self.optimizer.set_lr(self.lr) else: if self.warmup_end is False: self.warmup_end = True # else do nothing return self.optimizer.get_lr()
5,047
34.055556
87
py
sign-topic
sign-topic-main/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from collections.abc import Collection from dataclasses import dataclass, field from typing import List from omegaconf import II from fairseq.dataclass import FairseqDataclass from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler @dataclass class CosineLRScheduleConfig(FairseqDataclass): warmup_updates: int = field( default=0, metadata={"help": "warmup the learning rate linearly for the first N updates"}, ) warmup_init_lr: float = field( default=-1, metadata={ "help": "initial learning rate during warmup phase; default is cfg.lr" }, ) lr: List[float] = field( default=II("optimization.lr"), metadata={"help": "max learning rate, must be more than cfg.min_lr"}, ) min_lr: float = field(default=0.0, metadata={"help": "min learning rate"}) t_mult: float = field( default=1.0, metadata={"help": "factor to grow the length of each period"} ) lr_period_updates: float = field( default=-1, metadata={"help": "initial number of updates per period"} ) lr_shrink: float = field( default=0.1, metadata={"help": "shrink factor for annealing"} ) # This is not required, but is for convenience in inferring lr_period_updates max_update: int = II("optimization.max_update") @register_lr_scheduler("cosine", dataclass=CosineLRScheduleConfig) class CosineLRSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (``--warmup-init-lr``) until the configured max learning rate (``--lr``). During warmup:: lrs = torch.linspace(cfg.warmup_init_lr, cfg.lr, cfg.warmup_updates) lr = lrs[update_num] After warmup:: lr = cfg.min_lr + 0.5*(cfg.lr - cfg.min_lr)*(1 + cos(t_curr / t_i)) where ``t_curr`` is current percentage of updates within the current period range and ``t_i`` is the current period range, which is scaled by ``t_mul`` after every iteration. """ def __init__(self, cfg: CosineLRScheduleConfig, fairseq_optimizer): super().__init__(cfg, fairseq_optimizer) if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1: raise ValueError( "Cannot use a fixed learning rate schedule with cosine." f" Consider --lr-scheduler=fixed instead. ({cfg.lr})" ) self.max_lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr assert ( self.max_lr > cfg.min_lr ), f"max_lr (={cfg.lr}) must be more than min_lr (={cfg.min_lr})" warmup_end_lr = self.max_lr if cfg.warmup_init_lr < 0: cfg.warmup_init_lr = cfg.min_lr self.t_mult = cfg.t_mult self.period = cfg.lr_period_updates if self.period <= 0: assert ( cfg.max_update > 0 ), "Either --max_update or --lr-period-updates must be set" self.period = cfg.max_update - cfg.warmup_updates if cfg.warmup_updates > 0: # linearly warmup for the first cfg.warmup_updates self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates else: self.lr_step = 1 self.warmup_updates = cfg.warmup_updates self.lr_shrink = cfg.lr_shrink # initial learning rate self.lr = cfg.warmup_init_lr self.optimizer.set_lr(self.lr) def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.cfg.warmup_updates: self.lr = self.cfg.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.cfg.warmup_updates if self.t_mult != 1: i = math.floor( math.log( 1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult ) ) t_i = self.t_mult ** i * self.period t_curr = ( curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period ) else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.cfg.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * ( 1 + math.cos(math.pi * t_curr / t_i) ) self.optimizer.set_lr(self.lr) return self.lr
5,307
34.864865
87
py
sign-topic
sign-topic-main/fairseq/distributed/fully_sharded_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from typing import Optional import torch from fairseq.dataclass.configs import DistributedTrainingConfig from fairseq.distributed import utils as dist_utils try: from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP has_FSDP = True except ImportError: FSDP = torch.nn.Module has_FSDP = False class FullyShardedDataParallel(FSDP): """ A small wrapper around fairscale's FullyShardedDataParallel (FSDP) with some fairseq-specific checkpoint saving/loading logic. Args: use_sharded_state (bool): if True, then ``state_dict`` will return ``FSDP.local_state_dict`` and ``load_state_dict`` will call ``FSDP.load_local_state_dict``. Otherwise, ``state_dict`` will return the full model weights on data parallel rank 0 (empty on other ranks) and ``load_state_dict`` will broadcast model weights from rank 0 to other ranks. """ def __init__(self, *args, use_sharded_state: bool = False, **kwargs): if not has_FSDP: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) super().__init__(*args, **kwargs) self.use_sharded_state = use_sharded_state @property def unwrapped_module(self) -> torch.nn.Module: if self.flatten_parameters: return self.module.module else: return self.module def state_dict(self, destination=None, prefix="", keep_vars=False): if self.use_sharded_state: return super().local_state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) else: if self.rank == 0: return super().state_dict( destination=destination, prefix=prefix, keep_vars=keep_vars ) else: # We must call state_dict() due to use of communication # primitives. But we don't use the result. super().state_dict() return destination or {} def load_state_dict(self, state_dict, strict=True, model_cfg=None): if self.use_sharded_state: return super().load_local_state_dict(state_dict, strict=strict) else: state_dict = dist_utils.broadcast_object( state_dict, src_rank=0, group=self.process_group ) return super().load_state_dict(state_dict, strict=strict) @contextlib.contextmanager def fsdp_enable_wrap(cfg: DistributedTrainingConfig): try: from fairscale.nn import enable_wrap except ImportError: raise ImportError( "Cannot find FullyShardedDataParallel. " "Please install fairscale with: pip install fairscale" ) if cfg.memory_efficient_fp16: assert cfg.fp16 # memory_efficient_fp16 should imply fp16 group = dist_utils.get_data_parallel_group() if group is None and cfg.distributed_world_size == 1: from fairscale.utils.testing import DummyProcessGroup group = DummyProcessGroup(rank=0, size=1) fsdp_config = { "process_group": group, "reshard_after_forward": not cfg.no_reshard_after_forward, "mixed_precision": cfg.fp16 and not cfg.memory_efficient_fp16, "fp32_reduce_scatter": cfg.fp32_reduce_scatter, "flatten_parameters": not cfg.not_fsdp_flatten_parameters, "cpu_offload": cfg.cpu_offload, "compute_dtype": torch.float16 if cfg.fp16 else torch.float32, "bucket_cap_mb": cfg.bucket_cap_mb, "state_dict_device": torch.device("cpu"), # reduce GPU mem usage } with enable_wrap( wrapper_cls=FullyShardedDataParallel, use_sharded_state=cfg.use_sharded_state, **fsdp_config, ): yield def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs): """ Helper to wrap layers/modules in FSDP. This falls back to a no-op if fairscale is not available. Args: module (nn.Module): module to (maybe) wrap min_num_params (int, Optional): minimum number of layer params to wrap """ try: from fairscale.nn import wrap if min_num_params is not None: num_params = sum(p.numel() for p in module.parameters()) if num_params >= min_num_params: return wrap(module, **kwargs) else: return module else: return wrap(module, **kwargs) except ImportError: return module
4,832
34.536765
80
py
sign-topic
sign-topic-main/fairseq/distributed/module_proxy_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch import nn class ModuleProxyWrapper(nn.Module): """ Wrap a DistributedDataParallel module and forward requests for missing attributes to the module wrapped by DDP (the twice-wrapped module). Also forward calls to :func:`state_dict` and :func:`load_state_dict`. Usage:: module.xyz = "hello world" wrapped_module = DistributedDataParallel(module, **ddp_args) wrapped_module = ModuleProxyWrapper(wrapped_module) assert wrapped_module.xyz == "hello world" assert wrapped_module.state_dict().keys() == module.state_dict().keys() Args: module (nn.Module): module to wrap """ def __init__(self, module: nn.Module): super().__init__() assert hasattr( module, "module" ), "ModuleProxyWrapper expects input to wrap another module" self.module = module def __getattr__(self, name): """Forward missing attributes to twice-wrapped module.""" try: # defer to nn.Module's logic return super().__getattr__(name) except AttributeError: try: # forward to the once-wrapped module return getattr(self.module, name) except AttributeError: # forward to the twice-wrapped module return getattr(self.module.module, name) def state_dict(self, *args, **kwargs): """Forward to the twice-wrapped module.""" return self.module.module.state_dict(*args, **kwargs) def load_state_dict(self, *args, **kwargs): """Forward to the twice-wrapped module.""" return self.module.module.load_state_dict(*args, **kwargs) def forward(self, *args, **kwargs): return self.module(*args, **kwargs)
1,965
33.491228
79
py
sign-topic
sign-topic-main/fairseq/distributed/tpu_distributed_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from torch import nn from fairseq.distributed import utils class TPUDistributedDataParallel(nn.Module): def __init__(self, module, process_group): super().__init__() self.module = module self.process_group = process_group self.world_size = utils.get_world_size(self.process_group) def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def all_reduce_grads(self): gradients = [] for p in self.parameters(): if not p.requires_grad: continue if p.grad is None: p.grad = torch.zeros_like(p) if p.grad.requires_grad: raise RuntimeError( "TPUDistributedDataParallel only works with gradients that don't " "require grad" ) gradients.append(p.grad) import torch_xla.core.xla_model as xm xm.all_reduce( "sum", gradients, scale=1.0 / self.world_size, groups=self.process_group[1], )
1,285
28.227273
86
py
sign-topic
sign-topic-main/fairseq/distributed/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import logging import os import pickle import random import socket import struct import subprocess import warnings from argparse import Namespace from collections import OrderedDict from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional import torch import torch.distributed as dist from fairseq.dataclass.configs import DistributedTrainingConfig, FairseqConfig from omegaconf import open_dict try: import torch_xla.core.xla_model as xm except ImportError: xm = None # Flag to indicate if we're using Megatron # NOTE: this is a temporary hack until we move away from Megatron's model parallel init _USE_MEGATRON = False # Whether to use XLA ops (e.g., on TPUs) instead of CUDA ops. _USE_XLA = False logger = logging.getLogger(__name__) def is_master(cfg: DistributedTrainingConfig): return cfg.distributed_rank == 0 def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False): if cfg.distributed_init_method is not None or cfg.tpu: return num_pipelines_per_node = None if cfg.pipeline_model_parallel: num_pipeline_devices, num_pipelines_per_node = _pipeline_parallel_pre_init(cfg) if all( key in os.environ for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"] ): # support torch.distributed.launch _infer_torch_distributed_launch_init(cfg) elif cfg.distributed_port > 0: # we can determine the init method automatically for Slurm _infer_slurm_init(cfg, num_pipelines_per_node) elif cfg.distributed_world_size > 1 or force_distributed: # fallback for single node with multiple GPUs _infer_single_node_init(cfg) if cfg.pipeline_model_parallel: _pipeline_parallel_post_init(cfg, num_pipeline_devices, num_pipelines_per_node) elif not cfg.distributed_no_spawn: with open_dict(cfg): cfg.distributed_num_procs = min( torch.cuda.device_count(), cfg.distributed_world_size ) def _infer_torch_distributed_launch_init(cfg: DistributedTrainingConfig): cfg.distributed_init_method = "env://" cfg.distributed_world_size = int(os.environ["WORLD_SIZE"]) cfg.distributed_rank = int(os.environ["RANK"]) # processes are created by torch.distributed.launch cfg.distributed_no_spawn = True def _infer_slurm_init(cfg: DistributedTrainingConfig, num_pipelines_per_node): node_list = os.environ.get("SLURM_STEP_NODELIST") if node_list is None: node_list = os.environ.get("SLURM_JOB_NODELIST") if node_list is not None: try: hostnames = subprocess.check_output( ["scontrol", "show", "hostnames", node_list] ) cfg.distributed_init_method = "tcp://{host}:{port}".format( host=hostnames.split()[0].decode("utf-8"), port=cfg.distributed_port, ) nnodes = int(os.environ.get("SLURM_NNODES")) ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE") if ntasks_per_node is not None: ntasks_per_node = int(ntasks_per_node) else: ntasks = int(os.environ.get("SLURM_NTASKS")) nnodes = int(os.environ.get("SLURM_NNODES")) assert ntasks % nnodes == 0 ntasks_per_node = int(ntasks / nnodes) if ntasks_per_node == 1: gpus_per_node = torch.cuda.device_count() node_id = int(os.environ.get("SLURM_NODEID")) cfg.distributed_rank = node_id * gpus_per_node cfg.distributed_world_size = nnodes * gpus_per_node elif cfg.pipeline_model_parallel: assert ntasks_per_node == num_pipelines_per_node, ( "SLURM --ntasks-per-node must match number of pipelines per " "node (={})".format(num_pipelines_per_node) ) cfg.distributed_no_spawn = True # For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on # the first node, [1, 2] on the second node, etc. This # matches torch.distributed.launch. node_id = int(os.environ.get("SLURM_NODEID")) local_id = int(os.environ.get("SLURM_LOCALID")) cfg.distributed_rank = node_id * num_pipelines_per_node + local_id # In the above example, device_id will always be in [0, 1], # which also matches torch.distributed.launch. cfg.device_id = local_id # We also want to set distributed_world_size to be the total # number of pipelines across all nodes. cfg.distributed_world_size = nnodes * num_pipelines_per_node else: assert ntasks_per_node == cfg.distributed_world_size // nnodes cfg.distributed_no_spawn = True cfg.distributed_rank = int(os.environ.get("SLURM_PROCID")) cfg.device_id = int(os.environ.get("SLURM_LOCALID")) except subprocess.CalledProcessError as e: # scontrol failed raise e except FileNotFoundError: # Slurm is not installed pass def _infer_single_node_init(cfg: DistributedTrainingConfig): assert ( cfg.distributed_world_size <= torch.cuda.device_count() ), f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices" port = random.randint(10000, 20000) cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port) def _pipeline_parallel_pre_init(cfg: DistributedTrainingConfig): from fairseq import utils balance_exists = ( cfg.pipeline_balance is not None or cfg.pipeline_encoder_balance is not None or cfg.pipeline_decoder_balance is not None ) devices_exist = ( cfg.pipeline_devices is not None or cfg.pipeline_encoder_devices is not None or cfg.pipeline_decoder_devices is not None ) if not balance_exists: raise ValueError( "--pipeline-balance is currently required for pipeline model parallelism" ) if not devices_exist: raise ValueError( "--pipeline-devices is currently required for pipeline model parallelism" ) cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int) if cfg.pipeline_devices is not None: cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int) num_pipeline_devices = len(set(cfg.pipeline_devices)) else: cfg.pipeline_encoder_devices = utils.eval_str_list( cfg.pipeline_encoder_devices, type=int ) cfg.pipeline_decoder_devices = utils.eval_str_list( cfg.pipeline_decoder_devices, type=int ) num_pipeline_devices = len( set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices) ) gpus_per_node = torch.cuda.device_count() assert ( gpus_per_node >= num_pipeline_devices and gpus_per_node % num_pipeline_devices == 0 ), ( "the number of unique device IDs in --pipeline-devices must evenly divide " "the number of GPUs per node (multi-node pipelining is not yet supported)" ) num_pipelines_per_node = gpus_per_node // num_pipeline_devices return num_pipeline_devices, num_pipelines_per_node def _pipeline_parallel_post_init( cfg: DistributedTrainingConfig, num_pipeline_devices, num_pipelines_per_node ): if not cfg.distributed_no_spawn: # When distributed_no_spawn is False, we expect distributed_rank and # distributed_world_size to be based on the total number of GPUs, so # we need to correct them to be based on the number of pipelines. assert cfg.distributed_world_size % num_pipeline_devices == 0 cfg.distributed_world_size = cfg.distributed_world_size // num_pipeline_devices # In the case of 4-way MP on nodes with 8 GPUs, we want # distributed_rank to be the starting GPU index for each pipeline # i.e., 0, 2, ... gpus_per_node = torch.cuda.device_count() assert cfg.distributed_rank % gpus_per_node == 0 assert cfg.distributed_rank % num_pipeline_devices == 0 with open_dict(cfg): cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices # launch one process per pipeline cfg.distributed_num_procs = num_pipelines_per_node # if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0 # and 4, indicating the starting device IDs for each pipeline cfg.device_id *= num_pipeline_devices if cfg.device_id > 0: # if there's multiple pipelines on a node (e.g., 4-way MP on an 8 # GPU node), we need to adjust pipeline_devices accordingly logger.debug( "setting CUDA device={} on rank {}".format( cfg.device_id, cfg.distributed_rank ) ) torch.cuda.set_device(cfg.device_id) with open_dict(cfg): cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices] logger.info( "setting pipeline_devices={} on rank {}".format( cfg.pipeline_devices, cfg.distributed_rank ) ) def distributed_init(cfg: FairseqConfig): if isinstance(cfg, Namespace): from fairseq.dataclass.utils import convert_namespace_to_omegaconf cfg = convert_namespace_to_omegaconf(cfg) if not cfg.common.tpu: if torch.distributed.is_available() and torch.distributed.is_initialized(): warnings.warn( "Distributed is already initialized, cannot initialize twice!" ) else: logger.info( "distributed init (rank {}): {}".format( cfg.distributed_training.distributed_rank, cfg.distributed_training.distributed_init_method, ) ) dist.init_process_group( backend=cfg.distributed_training.distributed_backend, init_method=cfg.distributed_training.distributed_init_method, world_size=cfg.distributed_training.distributed_world_size, rank=cfg.distributed_training.distributed_rank, ) logger.info( "initialized host {} as rank {}".format( socket.gethostname(), cfg.distributed_training.distributed_rank, ) ) # perform a dummy all-reduce to initialize the NCCL communicator if torch.cuda.is_available(): dist.all_reduce(torch.zeros(1).cuda()) cfg.distributed_training.distributed_rank = torch.distributed.get_rank() else: assert xm.xrt_world_size() == cfg.distributed_training.distributed_world_size global _USE_XLA _USE_XLA = True cfg.distributed_training.device_id = xm.get_local_ordinal() cfg.distributed_training.distributed_rank = xm.get_ordinal() xm.rendezvous("distributed_init") # wait for all workers if is_master(cfg.distributed_training): logging.getLogger().setLevel(logging.INFO) else: logging.getLogger().setLevel(logging.WARNING) if cfg.common.model_parallel_size > 1: try: from fairseq.model_parallel.megatron.mpu import ( initialize_model_parallel, model_parallel_cuda_manual_seed, ) except ImportError: raise ImportError( "\n\nPlease install the megatron submodule:" "\n\n git submodule update --init " "fairseq/model_parallel/megatron" ) global _USE_MEGATRON _USE_MEGATRON = True initialize_model_parallel(cfg.common.model_parallel_size) model_parallel_cuda_manual_seed(cfg.common.seed) model_part_number = get_model_parallel_rank() cfg.checkpoint.checkpoint_suffix += "-model_part-{0}".format(model_part_number) if hasattr(cfg, "model") and getattr(cfg.model, "base_layers", 0) > 0: cfg.checkpoint.checkpoint_suffix = ( f"-rank-{cfg.distributed_training.distributed_rank}" ) return cfg.distributed_training.distributed_rank def distributed_main(i, main, cfg: FairseqConfig, kwargs): cfg.distributed_training.device_id = i if torch.cuda.is_available() and not cfg.common.cpu and not cfg.common.tpu: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_rank is None: # torch.multiprocessing.spawn cfg.distributed_training.distributed_rank = kwargs.pop("start_rank", 0) + i cfg.distributed_training.distributed_rank = distributed_init(cfg) after_distributed_init_fn = kwargs.pop("after_distributed_init_fn", None) if after_distributed_init_fn: cfg = after_distributed_init_fn(cfg) main(cfg, **kwargs) if torch.distributed.is_initialized(): torch.distributed.barrier(get_global_group()) def call_main(cfg: FairseqConfig, main, **kwargs): if cfg.distributed_training.distributed_init_method is None: infer_init_method(cfg.distributed_training) if cfg.distributed_training.distributed_init_method is not None: # distributed training if not cfg.distributed_training.distributed_no_spawn: start_rank = cfg.distributed_training.distributed_rank cfg.distributed_training.distributed_rank = None # assign automatically kwargs["start_rank"] = start_rank torch.multiprocessing.spawn( fn=distributed_main, args=(main, cfg, kwargs), nprocs=min( torch.cuda.device_count(), cfg.distributed_training.distributed_world_size, ), join=True, ) else: distributed_main(cfg.distributed_training.device_id, main, cfg, kwargs) elif cfg.common.tpu and cfg.distributed_training.distributed_world_size > 1: import torch_xla.distributed.xla_multiprocessing as xmp torch.multiprocessing.set_sharing_strategy("file_system") xmp.spawn( fn=distributed_main, args=(main, cfg, kwargs), # tpu-comment: # 8 devices in one TPU VM, is the max processes to be spawned. # The rest is driven by xm.distributed.xla_dist nprocs=min(cfg.distributed_training.distributed_world_size, 8), ) else: # single GPU main main(cfg, **kwargs) def use_xla(): global _USE_XLA return _USE_XLA def new_groups(grouped_ranks: List[List[int]]): if use_xla(): return ("tpu", grouped_ranks) else: groups = [dist.new_group(g) for g in grouped_ranks] my_group_idx = _find_my_group_index(grouped_ranks) return groups[my_group_idx] def _find_my_group_index(grouped_ranks): my_rank = get_global_rank() for i, group in enumerate(grouped_ranks): if my_rank in group: return i raise RuntimeError def _find_my_group(grouped_ranks): index = _find_my_group_index(grouped_ranks) return grouped_ranks[index] def get_rank(group): if use_xla(): assert group[0] == "tpu" my_group = _find_my_group(group[1]) return my_group.index(get_global_rank()) else: return dist.get_rank(group=group) def get_world_size(group): if use_xla(): assert group[0] == "tpu" my_group = _find_my_group(group[1]) return len(my_group) elif torch.distributed.is_initialized(): return dist.get_world_size(group=group) else: return 1 def get_global_group(): if use_xla(): return new_groups([list(range(get_global_world_size()))]) elif torch.distributed.is_initialized(): if not hasattr(get_global_group, "_global_group"): # ideally we could use torch.distributed.group.WORLD, but it seems # to cause random NCCL hangs in some cases get_global_group._global_group = dist.new_group() return get_global_group._global_group else: return None def get_global_rank(): if use_xla(): return xm.get_ordinal() elif torch.distributed.is_initialized(): return torch.distributed.get_rank() else: return 0 def get_global_world_size(): if use_xla(): return xm.xrt_world_size() elif torch.distributed.is_initialized(): return torch.distributed.get_world_size() else: return 1 def get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_data_parallel_group() else: return get_global_group() def get_data_parallel_rank(): """Return my rank for the data parallel group.""" return get_rank(get_data_parallel_group()) def get_data_parallel_world_size(): """Return world size for the data parallel group.""" return get_world_size(get_data_parallel_group()) def get_model_parallel_group(): global _USE_MEGATRON if _USE_MEGATRON: from fairseq.model_parallel.megatron import mpu return mpu.get_model_parallel_group() else: return None def get_model_parallel_rank(): """Return my rank for the model parallel group.""" return get_rank(get_model_parallel_group()) def get_model_parallel_world_size(): """Return world size for the model parallel group.""" return get_world_size(get_model_parallel_group()) def all_reduce(tensor, group, op="sum"): if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" tensor = [tensor] # wrap in a list to make xm.all_reduce in-place return xm.all_reduce(op, tensor, groups=group[1])[0] else: if op == "sum": op = dist.ReduceOp.SUM elif op == "max": op = dist.ReduceOp.MAX else: raise NotImplementedError dist.all_reduce(tensor, op=op, group=group) return tensor def broadcast(tensor, src, group): if use_xla(): # XLA doesn't support broadcast, hack it with all_reduce if get_rank(group) != src: tensor.zero_() all_reduce(tensor, group) else: dist.broadcast(tensor, src=src, group=group) def all_to_all(tensor, group): """Perform an all-to-all operation on a 1D Tensor.""" assert tensor.dim() == 1 split_count = get_world_size(group=group) assert tensor.numel() % split_count == 0 if use_xla(): assert isinstance(group, tuple) and group[0] == "tpu" return xm.all_to_all( tensor, split_dimension=0, concat_dimension=0, split_count=split_count, groups=group[1], ) else: output = torch.zeros_like(tensor) dist.all_to_all_single(output, tensor, group=group) return output def all_gather(tensor, group, return_tensor=False): """Perform an all-gather operation.""" if use_xla(): result = xm.all_gather(tensor, groups=group[1]) world_size = get_world_size(group=group) result = result.view(world_size, *tensor.size()) if return_tensor: return result else: return [result[i] for i in range(world_size)] else: world_size = get_world_size(group=group) rank = get_rank(group=group) tensor_list = [ tensor if i == rank else torch.empty_like(tensor) for i in range(world_size) ] dist.all_gather(tensor_list, tensor, group=group) if return_tensor: return torch.stack(tensor_list, dim=0) else: return tensor_list def all_gather_list(data, group=None, max_size=16384): """Gathers arbitrary data from all nodes into a list. Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python data. Note that *data* must be picklable and any CUDA tensors will be moved to CPU and returned on CPU as well. Args: data (Any): data from the local worker to be gathered on other workers group: group of the collective max_size (int, optional): maximum size of the data to be gathered across workers """ from fairseq import utils if group is None: group = get_global_group() rank = get_rank(group=group) world_size = get_world_size(group=group) buffer_size = max_size * world_size if ( not hasattr(all_gather_list, "_buffer") or all_gather_list._buffer.numel() < buffer_size ): all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size) all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory() buffer = all_gather_list._buffer buffer.zero_() cpu_buffer = all_gather_list._cpu_buffer data = utils.move_to_cpu(data) enc = pickle.dumps(data) enc_size = len(enc) header_size = 4 # size of header that contains the length of the encoded data size = header_size + enc_size if size > max_size: raise ValueError( "encoded data size ({}) exceeds max_size ({})".format(size, max_size) ) header = struct.pack(">I", enc_size) cpu_buffer[:size] = torch.ByteTensor(list(header + enc)) start = rank * max_size buffer[start : start + size].copy_(cpu_buffer[:size]) all_reduce(buffer, group=group) buffer = buffer.cpu() try: result = [] for i in range(world_size): out_buffer = buffer[i * max_size : (i + 1) * max_size] (enc_size,) = struct.unpack(">I", bytes(out_buffer[:header_size].tolist())) if enc_size > 0: result.append( pickle.loads( bytes(out_buffer[header_size : header_size + enc_size].tolist()) ) ) return result except pickle.UnpicklingError: raise Exception( "Unable to unpickle data from other workers. all_gather_list requires all " "workers to enter the function together, so this error usually indicates " "that the workers have fallen out of sync somehow. Workers can fall out of " "sync if one of them runs out of memory, or if there are other conditions " "in your training script that can cause one worker to finish an epoch " "while other workers are still iterating over their portions of the data. " "Try rerunning with --ddp-backend=legacy_ddp and see if that helps." ) def all_reduce_dict(data: Mapping[str, Any], device, group) -> Dict[str, Any]: """ AllReduce a dictionary of values across workers. We separately reduce items that are already on the device and items on CPU for better performance. Args: data (Mapping[str, Any]): dictionary of data to all-reduce, but cannot be a nested dictionary device (torch.device): device for the reduction group: group of the collective """ data_keys = list(data.keys()) # We want to separately reduce items that are already on the # device and items on CPU for performance reasons. cpu_data = OrderedDict() device_data = OrderedDict() for k in data_keys: t = data[k] if not torch.is_tensor(t): cpu_data[k] = torch.tensor(t, dtype=torch.double) elif t.device.type != device.type: cpu_data[k] = t.to(dtype=torch.double) else: device_data[k] = t.to(dtype=torch.double) def _all_reduce_dict(data: OrderedDict): if len(data) == 0: return data buf = torch.cat([t.view(-1) for t in data.values()]).to(device=device) all_reduce(buf, group=group) split_buf = torch.split(buf.clone(), [t.numel() for t in data.values()]) reduced_data = [t.view_as(orig) for t, orig in zip(split_buf, data.values())] return OrderedDict(zip(data.keys(), reduced_data)) cpu_data = _all_reduce_dict(cpu_data) device_data = _all_reduce_dict(device_data) def get_from_stack(key): if key in cpu_data: return cpu_data[key] elif key in device_data: return device_data[key] raise KeyError return OrderedDict([(key, get_from_stack(key)) for key in data_keys]) def broadcast_tensors( tensors: Optional[List[torch.Tensor]], src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> List[torch.Tensor]: """ Broadcasts a list of tensors without other (non-src) ranks needing to know the dtypes/shapes of the tensors. """ if dist_device is None: if torch.distributed.get_backend(group) == "nccl": dist_device = torch.device("cuda") else: dist_device = torch.device("cpu") # share metadata first to simplify transfer is_src_rank = get_rank(group) == src_rank if is_src_rank: metadata = [ {"size": t.size(), "dtype": t.dtype, "device": t.device} for t in tensors ] metadata = _broadcast_object_slow(metadata, src_rank, group, dist_device) else: metadata = _broadcast_object_slow(None, src_rank, group, dist_device) out_tensors = [] for i, meta in enumerate(metadata): if is_src_rank: tensor = tensors[i] broadcast(tensors[i].to(dist_device), src=src_rank, group=group) else: tensor = torch.zeros( [meta["size"].numel()], dtype=meta["dtype"], device=dist_device ) broadcast(tensor, src=src_rank, group=group) tensor = tensor.view(meta["size"]).to(meta["device"]) out_tensors.append(tensor) return out_tensors def broadcast_object( obj: Any, src_rank: int, group: object, dist_device: Optional[torch.device] = None, ) -> Any: """Broadcast an arbitrary Python object to other workers.""" if dist_device is None: if torch.distributed.get_backend(group) == "nccl": dist_device = torch.device("cuda") else: dist_device = torch.device("cpu") if get_rank(group) == src_rank: # split the tensors from the non-tensors so we can broadcast them # directly, avoiding unnecessary serialization/deserialization tensors = [] obj = _split_tensors_from_obj(obj, tensors) obj = _broadcast_object_slow(obj, src_rank, group, dist_device) tensors = broadcast_tensors(tensors, src_rank, group, dist_device) else: obj = _broadcast_object_slow(None, src_rank, group, dist_device) tensors = broadcast_tensors(None, src_rank, group, dist_device) return _put_tensors_in_obj(obj, tensors) def _broadcast_object_slow( obj: Any, src_rank: int, group: object, dist_device: torch.device, ) -> Any: if get_rank(group) == src_rank: # Emit data buffer = io.BytesIO() torch.save(obj, buffer) buffer = torch.ByteTensor(buffer.getbuffer()).to(dist_device) length = torch.LongTensor([len(buffer)]).to(dist_device) broadcast(length, src=src_rank, group=group) broadcast(buffer, src=src_rank, group=group) else: # Fetch from the source length = torch.LongTensor([0]).to(dist_device) broadcast(length, src=src_rank, group=group) buffer = torch.ByteTensor(int(length.item())).to(dist_device) broadcast(buffer, src=src_rank, group=group) buffer = io.BytesIO(buffer.cpu().numpy()) obj = torch.load(buffer, map_location="cpu") return obj @dataclass(frozen=True) class _TensorPlaceholder: index: int def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if torch.is_tensor(obj): placeholder = _TensorPlaceholder(index=len(tensors)) tensors.append(obj) return placeholder elif isinstance(obj, dict): return {k: _split_tensors_from_obj(v, tensors) for k, v in obj.items()} elif isinstance(obj, list): return [_split_tensors_from_obj(v, tensors) for v in obj] elif isinstance(obj, tuple): return tuple(_split_tensors_from_obj(v, tensors) for v in obj) elif isinstance(obj, set): return {_split_tensors_from_obj(v, tensors) for v in obj} else: return obj def _put_tensors_in_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if isinstance(obj, _TensorPlaceholder): return tensors[obj.index] elif isinstance(obj, dict): return {k: _put_tensors_in_obj(v, tensors) for k, v in obj.items()} elif isinstance(obj, list): return [_put_tensors_in_obj(v, tensors) for v in obj] elif isinstance(obj, tuple): return tuple(_put_tensors_in_obj(v, tensors) for v in obj) elif isinstance(obj, set): return {_put_tensors_in_obj(v, tensors) for v in obj} else: return obj
29,533
35.506799
107
py
sign-topic
sign-topic-main/fairseq/distributed/legacy_distributed_data_parallel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ A modified version of the legacy DistributedDataParallel module that uses c10d communication primitives. This version is simpler than the latest PyTorch version and is useful for debugging. Notably it does not overlap gradient communication with the backward pass, which makes it slower but more robust than the PyTorch version. This version also supports the *no_sync* context manager, which allows faster training with `--update-freq`. """ from collections import OrderedDict from contextlib import contextmanager import torch from torch import nn from fairseq.distributed import utils class LegacyDistributedDataParallel(nn.Module): """Implements distributed data parallelism at the module level. A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`. This version uses a c10d process group for communication and does not broadcast buffers. Args: module (~torch.nn.Module): module to be parallelized process_group: the c10d process group to be used for distributed data parallel all-reduction. buffer_size (int, optional): number of elements to buffer before performing all-reduce (default: 256M). """ def __init__(self, module, process_group, buffer_size=2 ** 28): super().__init__() self.module = module self.process_group = process_group self.world_size = utils.get_world_size(self.process_group) # Never use a bigger buffer than the number of model params self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters())) self.buffer = None # We can also forcibly accumulate grads locally and only do the # all-reduce at some later time self.accumulate_grads = False # make per-device lists of parameters paramlists = OrderedDict() for param in self.module.parameters(): device = param.device if paramlists.get(device) is None: paramlists[device] = [] paramlists[device] += [param] self.per_device_params = list(paramlists.values()) @contextmanager def no_sync(self): """A context manager to disable gradient synchronization.""" old_accumulate_grads = self.accumulate_grads self.accumulate_grads = True yield self.accumulate_grads = old_accumulate_grads def forward(self, *inputs, **kwargs): return self.module(*inputs, **kwargs) def all_reduce_grads(self): """ This function must be called explicitly after backward to reduce gradients. There is no automatic hook like c10d. """ def all_reduce_params(params): buffer = self.buffer nonzero_buffer = False if len(params) > 1: offset = 0 for p in params: sz = p.numel() if p.grad is not None: buffer[offset : offset + sz].copy_(p.grad.data.view(-1)) nonzero_buffer = True else: buffer[offset : offset + sz].zero_() offset += sz else: # we only have a single grad to all-reduce p = params[0] if p.grad is not None: buffer = p.grad.data nonzero_buffer = True elif p.numel() <= self.buffer.numel(): buffer = buffer[: p.numel()] buffer.zero_() else: buffer = torch.zeros_like(p) if nonzero_buffer: buffer.div_(self.world_size) utils.all_reduce(buffer, self.process_group) # copy all-reduced grads back into their original place offset = 0 for p in params: sz = p.numel() if p.grad is not None: p.grad.data.copy_(buffer[offset : offset + sz].view_as(p)) else: p.grad = buffer[offset : offset + sz].view_as(p).clone() offset += sz def reduction_fn(): # This function only needs to be called once if self.accumulate_grads: return if self.buffer is None: self.buffer = next(self.module.parameters()).new(self.buffer_size) for params in self.per_device_params: # All-reduce the gradients in buckets offset = 0 buffered_params = [] for param in params: if not param.requires_grad: continue if param.grad is None: param.grad = torch.zeros_like(param) if hasattr(param, "expert"): # Skip gradient sync for unshared parameters continue if param.grad.requires_grad: raise RuntimeError( "DistributedDataParallel only works " "with gradients that don't require " "grad" ) sz = param.numel() if sz > self.buffer.numel(): # all-reduce big params directly all_reduce_params([param]) else: if offset + sz > self.buffer.numel(): all_reduce_params(buffered_params) offset = 0 buffered_params.clear() buffered_params.append(param) offset += sz if len(buffered_params) > 0: all_reduce_params(buffered_params) reduction_fn()
6,104
35.777108
88
py
sign-topic
sign-topic-main/fairseq/distributed/distributed_timeout_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import signal import threading from torch import nn logger = logging.getLogger(__name__) class DistributedTimeoutWrapper(nn.Module): """ A wrapper that kills the process if no progress is made within a given *timeout*. The timer is reset every time :func:`forward` is called. Usage:: module = DistributedTimeoutWrapper(module, timeout=30) x = module(input) time.sleep(20) # safe x = module(input) time.sleep(45) # job will be killed before this returns Args: module (nn.Module): module to wrap timeout (int): number of seconds before killing the process (set to a value <= 0 to disable the timeout) signal (Optional): signal to send once timeout is triggered """ def __init__(self, module: nn.Module, timeout: int, signal=signal.SIGINT): super().__init__() self.module = module self.timeout = timeout self.signal = signal if timeout > 0: self._heartbeat = threading.Event() self._heartbeat_thread = threading.Thread( target=self._check_heartbeat, args=(os.getpid(),), daemon=True, ) self._heartbeat_thread.start() self._terminated = False else: self._heartbeat = None self._heartbeat_thread = None def __del__(self): self.stop_timeout() def __getattr__(self, name): """Forward missing attributes to wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.module, name) def stop_timeout(self): if self._heartbeat_thread is not None: self._terminated = True self._heartbeat_thread.join() def state_dict(self, *args, **kwargs): return self.module.state_dict(*args, **kwargs) def load_state_dict(self, *args, **kwargs): return self.module.load_state_dict(*args, **kwargs) def forward(self, *args, **kwargs): if self._heartbeat is not None: self._heartbeat.set() return self.module(*args, **kwargs) def _check_heartbeat(self, parent_pid): self._heartbeat.wait() # wait for the first forward pass while True: self._heartbeat.clear() success = self._heartbeat.wait(timeout=self.timeout) if self._terminated: break elif not success: logger.error( ( "Killing job for not making progress in {} seconds. " "Set --heartbeat-timeout=-1 to disable this timeout." ).format(int(self.timeout)) ) os.kill(parent_pid, self.signal) return
3,092
30.561224
78
py
sign-topic
sign-topic-main/fairseq/scoring/bleu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ctypes import math import sys from dataclasses import dataclass, field import torch from fairseq.dataclass import FairseqDataclass from fairseq.scoring import BaseScorer, register_scorer from fairseq.scoring.tokenizer import EvaluationTokenizer class BleuStat(ctypes.Structure): _fields_ = [ ("reflen", ctypes.c_size_t), ("predlen", ctypes.c_size_t), ("match1", ctypes.c_size_t), ("count1", ctypes.c_size_t), ("match2", ctypes.c_size_t), ("count2", ctypes.c_size_t), ("match3", ctypes.c_size_t), ("count3", ctypes.c_size_t), ("match4", ctypes.c_size_t), ("count4", ctypes.c_size_t), ] @dataclass class SacrebleuConfig(FairseqDataclass): sacrebleu_tokenizer: EvaluationTokenizer.ALL_TOKENIZER_TYPES = field( default="13a", metadata={"help": "tokenizer"} ) sacrebleu_lowercase: bool = field( default=False, metadata={"help": "apply lowercasing"} ) sacrebleu_char_level: bool = field( default=False, metadata={"help": "evaluate at character level"} ) @register_scorer("sacrebleu", dataclass=SacrebleuConfig) class SacrebleuScorer(BaseScorer): def __init__(self, cfg): super(SacrebleuScorer, self).__init__(cfg) import sacrebleu self.sacrebleu = sacrebleu self.tokenizer = EvaluationTokenizer( tokenizer_type=cfg.sacrebleu_tokenizer, lowercase=cfg.sacrebleu_lowercase, character_tokenization=cfg.sacrebleu_char_level, ) def add_string(self, ref, pred): self.ref.append(self.tokenizer.tokenize(ref)) self.pred.append(self.tokenizer.tokenize(pred)) def _score(self, order=4): if order != 4: raise NotImplementedError # tokenization and lowercasing are performed by self.tokenizer instead. return self.sacrebleu.corpus_bleu(self.pred, [self.ref], tokenize="none") def score(self, order=4): return self._score(order).score def result_string(self, order=4): return self._score(order).format() @dataclass class BleuConfig(FairseqDataclass): pad: int = field(default=1, metadata={"help": "padding index"}) eos: int = field(default=2, metadata={"help": "eos index"}) unk: int = field(default=3, metadata={"help": "unk index"}) @register_scorer("bleu", dataclass=BleuConfig) class Scorer(object): def __init__(self, cfg): self.stat = BleuStat() self.pad = cfg.pad self.eos = cfg.eos self.unk = cfg.unk try: from fairseq import libbleu except ImportError as e: sys.stderr.write( "ERROR: missing libbleu.so. run `pip install --editable .`\n" ) raise e self.C = ctypes.cdll.LoadLibrary(libbleu.__file__) self.reset() def reset(self, one_init=False): if one_init: self.C.bleu_one_init(ctypes.byref(self.stat)) else: self.C.bleu_zero_init(ctypes.byref(self.stat)) def add(self, ref, pred): if not isinstance(ref, torch.IntTensor): raise TypeError("ref must be a torch.IntTensor (got {})".format(type(ref))) if not isinstance(pred, torch.IntTensor): raise TypeError("pred must be a torch.IntTensor(got {})".format(type(pred))) # don't match unknown words rref = ref.clone() assert not rref.lt(0).any() rref[rref.eq(self.unk)] = -999 rref = rref.contiguous().view(-1) pred = pred.contiguous().view(-1) self.C.bleu_add( ctypes.byref(self.stat), ctypes.c_size_t(rref.size(0)), ctypes.c_void_p(rref.data_ptr()), ctypes.c_size_t(pred.size(0)), ctypes.c_void_p(pred.data_ptr()), ctypes.c_int(self.pad), ctypes.c_int(self.eos), ) def score(self, order=4): psum = sum( math.log(p) if p > 0 else float("-Inf") for p in self.precision()[:order] ) return self.brevity() * math.exp(psum / order) * 100 def precision(self): def ratio(a, b): return a / b if b > 0 else 0 return [ ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4), ] def brevity(self): r = self.stat.reflen / self.stat.predlen return min(1, math.exp(1 - r)) def result_string(self, order=4): assert order <= 4, "BLEU scores for order > 4 aren't supported" fmt = "BLEU{} = {:2.2f}, {:2.1f}" for _ in range(1, order): fmt += "/{:2.1f}" fmt += " (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})" bleup = [p * 100 for p in self.precision()[:order]] return fmt.format( order, self.score(order=order), *bleup, self.brevity(), self.stat.predlen / self.stat.reflen, self.stat.predlen, self.stat.reflen )
5,347
30.64497
88
py
sign-topic
sign-topic-main/fairseq/benchmark/dummy_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.data import Dictionary from fairseq.models import ( FairseqDecoder, FairseqLanguageModel, register_model, register_model_architecture, ) @register_model("dummy_model") class DummyModel(FairseqLanguageModel): def __init__(self, args, encoder): super().__init__(encoder) self.args = args @staticmethod def add_args(parser): parser.add_argument("--num-layers", type=int, default=24) parser.add_argument("--embed-dim", type=int, default=1024) @classmethod def build_model(cls, args, task): encoder = DummyEncoder( num_embed=len(task.target_dictionary), embed_dim=args.embed_dim, num_layers=args.num_layers, ) return cls(args, encoder) def forward(self, src_tokens, masked_tokens=None, **kwargs): return self.decoder(src_tokens, masked_tokens=masked_tokens) class DummyEncoder(FairseqDecoder): def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24): super().__init__(Dictionary()) self.embed = nn.Embedding( num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0 ) self.layers_a = nn.ModuleList( [ nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection nn.Linear(3 * embed_dim, embed_dim), # skip self-attention nn.Linear(embed_dim, embed_dim), # output projection nn.Dropout(), ) for i in range(num_layers) ] ) self.layers_b = nn.ModuleList( [ nn.Sequential( nn.LayerNorm(embed_dim), nn.Linear(embed_dim, 4 * embed_dim), # FFN nn.ReLU(), nn.Linear(4 * embed_dim, embed_dim), # FFN nn.Dropout(0.1), ) for i in range(num_layers) ] ) self.out_proj = nn.Linear(embed_dim, num_embed) def forward(self, tokens, masked_tokens=None): x = self.embed(tokens) for layer_a, layer_b in zip(self.layers_a, self.layers_b): x = x + layer_a(x) x = x + layer_b(x) x = self.out_proj(x) if masked_tokens is not None: x = x[masked_tokens] return (x,) def max_positions(self): return 1024 def get_normalized_probs(self, net_output, log_probs, sample=None): logits = net_output[0].float() if log_probs: return F.log_softmax(logits, dim=-1) else: return F.softmax(logits, dim=-1) @register_model_architecture("dummy_model", "dummy_model") def base_architecture(args): pass
3,090
30.865979
84
py
sign-topic
sign-topic-main/fairseq/benchmark/dummy_mt.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("dummy_mt") class DummyMTTask(LegacyFairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument("--dict-size", default=49996, type=int) parser.add_argument("--dataset-size", default=100000, type=int) parser.add_argument("--src-len", default=30, type=int) parser.add_argument("--tgt-len", default=30, type=int) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed dictionary.pad_to_multiple_(8) # often faster if divisible by 8 self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1 self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1 @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" dictionary = Dictionary() for i in range(args.dict_size): dictionary.add_symbol("word{}".format(i)) logger.info("dictionary: {} types".format(len(dictionary))) args.max_source_positions = args.src_len + dictionary.pad() + 2 args.max_target_positions = args.tgt_len + dictionary.pad() + 2 return cls(args, dictionary) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ item_size = max(self.args.src_len, self.args.tgt_len) if self.args.batch_size is not None: bsz = self.args.batch_size else: bsz = max(1, self.args.max_tokens // item_size) tgt = torch.stack([self.dummy_tgt for _ in range(bsz)]) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.args.src_len, dtype=torch.long ), "prev_output_tokens": tgt.clone(), }, "target": tgt, "nsentences": bsz, "ntokens": bsz * self.args.tgt_len, }, num_items=self.args.dataset_size, item_size=item_size, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary class DummyDataset(FairseqDataset): def __init__(self, batch, num_items, item_size): super().__init__() self.batch = batch self.num_items = num_items self.item_size = item_size def __getitem__(self, index): return index def __len__(self): return self.num_items def collater(self, samples): return self.batch @property def sizes(self): return np.array([self.item_size] * self.num_items) def num_tokens(self, index): return self.item_size def size(self, index): return self.item_size def ordered_indices(self): return np.arange(self.num_items) @property def supports_prefetch(self): return False
3,677
29.65
84
py
sign-topic
sign-topic-main/fairseq/benchmark/dummy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Optional import torch from omegaconf import II from .dummy_dataset import DummyDataset from fairseq.data import Dictionary from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task logger = logging.getLogger(__name__) @dataclass class DummyMaskedLMConfig(FairseqDataclass): dict_size: int = 49996 dataset_size: int = 100000 tokens_per_sample: int = field( default=512, metadata={ "help": "max number of total tokens over all" " segments per sample for BERT dataset" }, ) batch_size: Optional[int] = II("dataset.batch_size") max_tokens: Optional[int] = II("dataset.max_tokens") max_target_positions: int = II("task.tokens_per_sample") @register_task("dummy_masked_lm", dataclass=DummyMaskedLMConfig) class DummyMaskedLMTask(FairseqTask): def __init__(self, cfg: DummyMaskedLMConfig): super().__init__(cfg) self.dictionary = Dictionary() for i in range(cfg.dict_size): self.dictionary.add_symbol("word{}".format(i)) logger.info("dictionary: {} types".format(len(self.dictionary))) # add mask token self.mask_idx = self.dictionary.add_symbol("<mask>") self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 mask_idx = 0 pad_idx = 1 seq = torch.arange(cfg.tokens_per_sample) + pad_idx + 1 mask = torch.arange(2, cfg.tokens_per_sample, 7) # ~15% src = seq.clone() src[mask] = mask_idx tgt = torch.full_like(seq, pad_idx) tgt[mask] = seq[mask] self.dummy_src = src self.dummy_tgt = tgt def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.cfg.batch_size is not None: bsz = self.cfg.batch_size else: bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.cfg.tokens_per_sample, dtype=torch.long ), }, "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), "nsentences": bsz, "ntokens": bsz * self.cfg.tokens_per_sample, }, num_items=self.cfg.dataset_size, item_size=self.cfg.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
3,123
31.884211
84
py
sign-topic
sign-topic-main/fairseq/benchmark/dummy_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Optional import torch from .dummy_dataset import DummyDataset from fairseq.data import Dictionary from fairseq.dataclass import FairseqDataclass from fairseq.tasks import FairseqTask, register_task from omegaconf import II logger = logging.getLogger(__name__) @dataclass class DummyLMConfig(FairseqDataclass): dict_size: int = 49996 dataset_size: int = 100000 tokens_per_sample: int = field( default=512, metadata={"help": "max sequence length"} ) add_bos_token: bool = False batch_size: Optional[int] = II("dataset.batch_size") max_tokens: Optional[int] = II("dataset.max_tokens") max_target_positions: int = II("task.tokens_per_sample") @register_task("dummy_lm", dataclass=DummyLMConfig) class DummyLMTask(FairseqTask): def __init__(self, cfg: DummyLMConfig): super().__init__(cfg) # load dictionary self.dictionary = Dictionary() for i in range(cfg.dict_size): self.dictionary.add_symbol("word{}".format(i)) self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 logger.info("dictionary: {} types".format(len(self.dictionary))) seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1 self.dummy_src = seq[:-1] self.dummy_tgt = seq[1:] def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if self.cfg.batch_size is not None: bsz = self.cfg.batch_size else: bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) self.datasets[split] = DummyDataset( { "id": 1, "net_input": { "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), "src_lengths": torch.full( (bsz,), self.cfg.tokens_per_sample, dtype=torch.long ), }, "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), "nsentences": bsz, "ntokens": bsz * self.cfg.tokens_per_sample, }, num_items=self.cfg.dataset_size, item_size=self.cfg.tokens_per_sample, ) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
2,757
31.833333
84
py
sign-topic
sign-topic-main/fairseq/data/language_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) def check_alignment(alignment, src_len, tgt_len): if alignment is None or len(alignment) == 0: return False if ( alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1 ): logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique( align_tgt, return_inverse=True, return_counts=True ) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1.0 / align_weights.float() id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor( [s["source"].ne(pad_idx).long().sum() for s in samples] ) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor( [s["target"].ne(pad_idx).long().sum() for s in samples] ).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get("prev_output_tokens", None) is not None: prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) elif input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { "id": id, "nsentences": len(samples), "ntokens": ntokens, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, "target": target, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( 0, sort_order ) if samples[0].get("alignment", None) is not None: bsz, tgt_sz = batch["target"].shape src_sz = batch["net_input"]["src_tokens"].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz if left_pad_source: offsets[:, 0] += src_sz - src_lengths if left_pad_target: offsets[:, 1] += tgt_sz - tgt_lengths alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip( sort_order, offsets, src_lengths, tgt_lengths ) for alignment in [samples[align_idx]["alignment"].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch["alignments"] = alignments batch["align_weights"] = align_weights if samples[0].get("constraints", None) is not None: # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0 : lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints.index_select(0, sort_order) return batch class LanguagePairDataset(FairseqDataset): """ A pair of torch.utils.data.Datasets. Args: src (torch.utils.data.Dataset): source dataset to wrap src_sizes (List[int]): source sentence lengths src_dict (~fairseq.data.Dictionary): source vocabulary tgt (torch.utils.data.Dataset, optional): target dataset to wrap tgt_sizes (List[int], optional): target sentence lengths tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary left_pad_source (bool, optional): pad source tensors on the left side (default: True). left_pad_target (bool, optional): pad target tensors on the left side (default: False). shuffle (bool, optional): shuffle dataset elements before batching (default: True). input_feeding (bool, optional): create a shifted version of the targets to be passed into the model for teacher forcing (default: True). remove_eos_from_source (bool, optional): if set, removes eos from end of source if it's present (default: False). append_eos_to_target (bool, optional): if set, appends eos to end of target if it's absent (default: False). align_dataset (torch.utils.data.Dataset, optional): dataset containing alignments. constraints (Tensor, optional): 2d tensor with a concatenated, zero- delimited list of constraints for each sentence. append_bos (bool, optional): if set, appends bos to the beginning of source/target sentence. num_buckets (int, optional): if set to a value greater than 0, then batches will be bucketed into the given number of batch shapes. src_lang_id (int, optional): source language ID, if set, the collated batch will contain a field 'src_lang_id' in 'net_input' which indicates the source language of the samples. tgt_lang_id (int, optional): target language ID, if set, the collated batch will contain a field 'tgt_lang_id' which indicates the target language of the samples. """ def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False, align_dataset=None, constraints=None, append_bos=False, eos=None, num_buckets=0, src_lang_id=None, tgt_lang_id=None, pad_to_multiple=1, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() if tgt is not None: assert len(src) == len( tgt ), "Source and target must contain the same number of examples" self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.sizes = ( np.vstack((self.src_sizes, self.tgt_sizes)).T if self.tgt_sizes is not None else self.src_sizes ) self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target self.align_dataset = align_dataset if self.align_dataset is not None: assert ( self.tgt_sizes is not None ), "Both source and target needed when alignments are provided" self.constraints = constraints self.append_bos = append_bos self.eos = eos if eos is not None else src_dict.eos() self.src_lang_id = src_lang_id self.tgt_lang_id = tgt_lang_id if num_buckets > 0: from fairseq.data import BucketPadLengthDataset self.src = BucketPadLengthDataset( self.src, sizes=self.src_sizes, num_buckets=num_buckets, pad_idx=self.src_dict.pad(), left_pad=self.left_pad_source, ) self.src_sizes = self.src.sizes logger.info("bucketing source lengths: {}".format(list(self.src.buckets))) if self.tgt is not None: self.tgt = BucketPadLengthDataset( self.tgt, sizes=self.tgt_sizes, num_buckets=num_buckets, pad_idx=self.tgt_dict.pad(), left_pad=self.left_pad_target, ) self.tgt_sizes = self.tgt.sizes logger.info( "bucketing target lengths: {}".format(list(self.tgt.buckets)) ) # determine bucket sizes using self.num_tokens, which will return # the padded lengths (thanks to BucketPadLengthDataset) num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long]) self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) self.buckets = [ (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) ] else: self.buckets = None self.pad_to_multiple = pad_to_multiple def get_batch_shapes(self): return self.buckets def __getitem__(self, index): tgt_item = self.tgt[index] if self.tgt is not None else None src_item = self.src[index] # Append EOS to end of tgt sentence if it does not have an EOS and remove # EOS from end of src sentence if it exists. This is useful when we use # use existing datasets for opposite directions i.e., when we want to # use tgt_dataset as src_dataset and vice versa if self.append_eos_to_target: eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() if self.tgt and self.tgt[index][-1] != eos: tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.append_bos: bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() if self.tgt and self.tgt[index][0] != bos: tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) bos = self.src_dict.bos() if self.src[index][0] != bos: src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) if self.remove_eos_from_source: eos = self.src_dict.eos() if self.src[index][-1] == eos: src_item = self.src[index][:-1] example = { "id": index, "source": src_item, "target": tgt_item, } if self.align_dataset is not None: example["alignment"] = self.align_dataset[index] if self.constraints is not None: example["constraints"] = self.constraints[index] return example def __len__(self): return len(self.src) def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate pad_to_length (dict, optional): a dictionary of {'source': source_pad_to_length, 'target': target_pad_to_length} to indicate the max length to pad to in source and target respectively. Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the left if *left_pad_source* is ``True``. - `src_lengths` (LongTensor): 1D Tensor of the unpadded lengths of each source sentence of shape `(bsz)` - `prev_output_tokens` (LongTensor): a padded 2D Tensor of tokens in the target sentence, shifted right by one position for teacher forcing, of shape `(bsz, tgt_len)`. This key will not be present if *input_feeding* is ``False``. Padding will appear on the left if *left_pad_target* is ``True``. - `src_lang_id` (LongTensor): a long Tensor which contains source language IDs of each sample in the batch - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the left if *left_pad_target* is ``True``. - `tgt_lang_id` (LongTensor): a long Tensor which contains target language IDs of each sample in the batch """ res = collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos, left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple, ) if self.src_lang_id is not None or self.tgt_lang_id is not None: src_tokens = res["net_input"]["src_tokens"] bsz = src_tokens.size(0) if self.src_lang_id is not None: res["net_input"]["src_lang_id"] = ( torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens) ) if self.tgt_lang_id is not None: res["tgt_lang_id"] = ( torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens) ) return res def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" sizes = self.src_sizes[indices] if self.tgt_sizes is not None: sizes = np.maximum(sizes, self.tgt_sizes[indices]) return sizes def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)).astype(np.int64) else: indices = np.arange(len(self), dtype=np.int64) if self.buckets is None: # sort by target length, then source length if self.tgt_sizes is not None: indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(self.src_sizes[indices], kind="mergesort")] else: # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) return indices[ np.argsort(self.bucketed_num_tokens[indices], kind="mergesort") ] @property def supports_prefetch(self): return getattr(self.src, "supports_prefetch", False) and ( getattr(self.tgt, "supports_prefetch", False) or self.tgt is None ) def prefetch(self, indices): self.src.prefetch(indices) if self.tgt is not None: self.tgt.prefetch(indices) if self.align_dataset is not None: self.align_dataset.prefetch(indices) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ return data_utils.filter_paired_dataset_indices_by_size( self.src_sizes, self.tgt_sizes, indices, max_sizes, )
19,102
38.964435
90
py
sign-topic
sign-topic-main/fairseq/data/transform_eos_concat_langpair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import torch from torch.utils.data.dataloader import default_collate from fairseq.data import ConcatDataset logger = logging.getLogger(__name__) class TransformEosConcatLangPairDataset(ConcatDataset): """ It is a combination of TransformEosLangPairDataset and ConcatDataset for multiple LangPairDataset datasets. Assume all datasets share the same src_eos, tgt_bos, left_pad_source and left_pad_target """ def __init__( self, datasets, src_eos, tgt_bos, new_src_eos=None, new_tgt_bos=None, ): super().__init__(datasets) if new_src_eos is not None: assert len(new_src_eos) == len(datasets) else: new_src_eos = [] if new_tgt_bos is not None: assert len(new_tgt_bos) == len(datasets) else: new_tgt_bos = [] self.src_eos = src_eos self.tgt_bos = tgt_bos self.new_src_eos = ( torch.LongTensor(new_src_eos).cpu() if len(new_src_eos) > 0 else [] ) self.new_tgt_bos = ( torch.LongTensor(new_tgt_bos).cpu() if len(new_tgt_bos) > 0 else [] ) self.left_pad_source = self.is_left_pad_source(datasets) self.left_pad_target = self.is_left_pad_target(datasets) self.pad_idx = self.src_dict_pad() def src_dict_pad(self): if hasattr(self.datasets[0], "src_dict"): return self.datasets[0].src_dict.pad() if hasattr(self.datasets[0], "dataset"): return self.datasets[0].dataset.src_dict.pad() raise NotImplementedError("No src_dict is found") def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return dataset_idx, self.datasets[dataset_idx][sample_idx] def is_left_pad_source(self, datasets): def _left_pad_source(ds): if hasattr(ds, "left_pad_source"): return ds.left_pad_source if hasattr(ds, "dataset"): return _left_pad_source(ds.dataset) logger.warn(f"{type(ds)} has no left_pad_source, using default True") return True left_pad_source = _left_pad_source(datasets[0]) for ds in datasets: if left_pad_source != _left_pad_source(ds): raise ValueError("Different left_pad_source setting detected!") return left_pad_source def is_left_pad_target(self, datasets): def _left_pad_target(ds): if hasattr(ds, "left_pad_target"): return ds.left_pad_target if hasattr(ds, "dataset"): return _left_pad_target(ds.dataset) logger.warn(f"{type(ds)} has no left_pad_target, using default False") return False left_pad_target = _left_pad_target(datasets[0]) for ds in datasets: if left_pad_target != _left_pad_target(ds): raise ValueError("Different left_pad_target setting detected!") return left_pad_target def collater(self, samples, **extra_args): if len(samples) == 0: return samples dataset_ids = [s[0] for s in samples] samples = [s[1] for s in samples] if hasattr(self.datasets[0], "collater"): samples = self.datasets[0].collater(samples, **extra_args) else: samples = default_collate(samples, **extra_args) if len(self.new_src_eos) > 0: if self.left_pad_source: assert ( samples["net_input"]["src_tokens"][:, -1] != self.src_eos ).sum() == 0 samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos[ dataset_ids ] else: eos_idx = samples["net_input"]["src_lengths"] - 1 assert ( samples["net_input"]["src_tokens"][ torch.arange(eos_idx.size(0)), eos_idx ] != self.src_eos ).sum() == 0 samples["net_input"]["src_tokens"].scatter_( 1, eos_idx.view(-1, 1), self.new_src_eos[dataset_ids].view(-1, 1) ) if len(self.new_tgt_bos) > 0 and "prev_output_tokens" in samples["net_input"]: if self.left_pad_target: # TODO: support different padding direction on target side raise NotImplementedError( "TransformEosLangPairDataset does not implement --left-pad-target True option" ) else: assert ( samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos ).sum() == 0 samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos[ dataset_ids ] return samples
5,126
35.621429
111
py
sign-topic
sign-topic-main/fairseq/data/token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import FairseqDataset, plasma_utils from fairseq.data.indexed_dataset import best_fitting_int_dtype from typing import Tuple class TokenBlockDataset(FairseqDataset): """Break a Dataset of tokens into blocks. Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes (List[int]): sentence lengths (required for 'complete' and 'eos') block_size (int): maximum block size (ignored in 'eos' break mode) break_mode (str, optional): Mode used for breaking tokens. Values can be one of: - 'none': break tokens into equally sized blocks (up to block_size) - 'complete': break tokens into blocks (up to block_size) such that blocks contains complete sentences, although block_size may be exceeded if some sentences exceed block_size - 'complete_doc': similar to 'complete' mode, but do not cross document boundaries - 'eos': each block contains one sentence (block_size is ignored) include_targets (bool, optional): return next tokens as targets (default: False). document_sep_len (int, optional): document separator size (required for 'complete_doc' break mode). Typically 1 if the sentences have eos and 0 otherwise. """ def __init__( self, dataset, sizes, block_size, pad, eos, break_mode=None, include_targets=False, document_sep_len=1, use_plasma_view=False, split_path=None, plasma_path=None, ): super().__init__() self.dataset = dataset self.pad = pad self.eos = eos self.include_targets = include_targets assert len(dataset) > 0 assert len(dataset) == len(sizes) _sizes, block_to_dataset_index, slice_indices = self._build_slice_indices( sizes, break_mode, document_sep_len, block_size ) if use_plasma_view: plasma_id = (block_size, document_sep_len, str(break_mode), len(dataset)) self._slice_indices = plasma_utils.PlasmaView( slice_indices, split_path, (plasma_id, 0), plasma_path=plasma_path ) self._sizes = plasma_utils.PlasmaView( _sizes, split_path, (plasma_id, 1), plasma_path=plasma_path ) self._block_to_dataset_index = plasma_utils.PlasmaView( block_to_dataset_index, split_path, (plasma_id, 2), plasma_path=plasma_path, ) else: self._slice_indices = plasma_utils.PlasmaArray(slice_indices) self._sizes = plasma_utils.PlasmaArray(_sizes) self._block_to_dataset_index = plasma_utils.PlasmaArray( block_to_dataset_index ) @staticmethod def _build_slice_indices( sizes, break_mode, document_sep_len, block_size ) -> Tuple[np.ndarray]: """Use token_block_utils_fast to build arrays for indexing into self.dataset""" try: from fairseq.data.token_block_utils_fast import ( _get_slice_indices_fast, _get_block_to_dataset_index_fast, ) except ImportError: raise ImportError( "Please build Cython components with: `pip install --editable .` " "or `python setup.py build_ext --inplace`" ) if isinstance(sizes, list): sizes = np.array(sizes, dtype=np.int64) else: if torch.is_tensor(sizes): sizes = sizes.numpy() sizes = sizes.astype(np.int64) break_mode = break_mode if break_mode is not None else "none" # For "eos" break-mode, block_size is not required parameters. if break_mode == "eos" and block_size is None: block_size = 0 slice_indices = _get_slice_indices_fast( sizes, str(break_mode), block_size, document_sep_len ) _sizes = slice_indices[:, 1] - slice_indices[:, 0] # build index mapping block indices to the underlying dataset indices if break_mode == "eos": # much faster version for eos break mode block_to_dataset_index = np.stack( [ np.arange(len(sizes)), # starting index in dataset np.zeros( len(sizes), dtype=np.compat.long ), # starting offset within starting index np.arange(len(sizes)), # ending index in dataset ], 1, ) else: block_to_dataset_index = _get_block_to_dataset_index_fast( sizes, slice_indices, ) size_dtype = np.uint16 if block_size < 65535 else np.uint32 num_tokens = slice_indices[-1].max() slice_indices_dtype = best_fitting_int_dtype(num_tokens) slice_indices = slice_indices.astype(slice_indices_dtype) _sizes = _sizes.astype(size_dtype) block_to_dataset_index = block_to_dataset_index.astype(slice_indices_dtype) return _sizes, block_to_dataset_index, slice_indices @property def slice_indices(self): return self._slice_indices.array @property def sizes(self): return self._sizes.array @property def block_to_dataset_index(self): return self._block_to_dataset_index.array def attr(self, attr: str, index: int): start_ds_idx, _, _ = self.block_to_dataset_index[index] return self.dataset.attr(attr, start_ds_idx) def __getitem__(self, index): start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index] buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) slice_s, slice_e = self.slice_indices[index] length = slice_e - slice_s s, e = start_offset, start_offset + length item = buffer[s:e] if self.include_targets: # *target* is the original sentence (=item) # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) if s == 0: source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]]) past_target = torch.cat( [item.new([self.pad, self.eos]), buffer[0 : e - 2]] ) else: source = buffer[s - 1 : e - 1] if s == 1: past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]]) else: past_target = buffer[s - 2 : e - 2] return source, item, past_target return item def __len__(self): return len(self.slice_indices) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch( { ds_idx for index in indices for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]] for ds_idx in range(start_ds_idx, end_ds_idx + 1) } )
7,652
35.971014
87
py
sign-topic
sign-topic-main/fairseq/data/subsample_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np from . import BaseWrapperDataset logger = logging.getLogger(__name__) class SubsampleDataset(BaseWrapperDataset): """Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples Args: dataset (~torch.utils.data.Dataset): dataset to subsample size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive) """ def __init__(self, dataset, size_ratio, shuffle=False): super().__init__(dataset) assert size_ratio < 1 self.actual_size = np.ceil(len(dataset) * size_ratio).astype(int) self.indices = np.random.choice( list(range(len(self.dataset))), self.actual_size, replace=False ) self.shuffle = shuffle logger.info( "subsampled dataset from {} to {} (ratio={})".format( len(self.dataset), self.actual_size, size_ratio ) ) def __getitem__(self, index): return self.dataset[self.indices[index]] def __len__(self): return self.actual_size def collater(self, samples): return self.dataset.collater(samples) @property def sizes(self): return self.dataset.sizes[self.indices] @property def name(self): return self.dataset.name def num_tokens(self, index): return self.dataset.num_tokens(self.indices[index]) def size(self, index): return self.dataset.size(self.indices[index]) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) def prefetch(self, indices): self.dataset.prefetch(self.indices[indices])
2,117
28.013699
101
py
sign-topic
sign-topic-main/fairseq/data/prepend_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependDataset(BaseWrapperDataset): def __init__(self, dataset, prepend_getter, ensure_first_token_is=None): super().__init__(dataset) self.prepend_getter = prepend_getter self.ensure_first_token = ensure_first_token_is def __getitem__(self, idx): item = self.dataset[idx] is_tuple = isinstance(item, tuple) src = item[0] if is_tuple else item assert self.ensure_first_token is None or src[0] == self.ensure_first_token prepend_idx = self.prepend_getter(self.dataset, idx) assert isinstance(prepend_idx, int) src[0] = prepend_idx item = tuple((src,) + item[1:]) if is_tuple else src return item
953
31.896552
83
py
sign-topic
sign-topic-main/fairseq/data/base_wrapper_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data.dataloader import default_collate from . import FairseqDataset class BaseWrapperDataset(FairseqDataset): def __init__(self, dataset): super().__init__() self.dataset = dataset def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples): if hasattr(self.dataset, "collater"): return self.dataset.collater(samples) else: return default_collate(samples) @property def sizes(self): return self.dataset.sizes def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def attr(self, attr: str, index: int): return self.dataset.attr(attr, index) def prefetch(self, indices): self.dataset.prefetch(indices) def get_batch_shapes(self): return self.dataset.get_batch_shapes() def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): return self.dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) def filter_indices_by_size(self, indices, max_sizes): return self.dataset.filter_indices_by_size(indices, max_sizes) @property def can_reuse_epoch_itr_across_epochs(self): return self.dataset.can_reuse_epoch_itr_across_epochs def set_epoch(self, epoch): super().set_epoch(epoch) if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch)
2,153
26.265823
70
py
sign-topic
sign-topic-main/fairseq/data/raw_label_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class RawLabelDataset(FairseqDataset): def __init__(self, labels): super().__init__() self.labels = labels def __getitem__(self, index): return self.labels[index] def __len__(self): return len(self.labels) def collater(self, samples): return torch.tensor(samples)
546
21.791667
65
py
sign-topic
sign-topic-main/fairseq/data/resampling_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np from fairseq.data import BaseWrapperDataset, plasma_utils logger = logging.getLogger(__name__) class ResamplingDataset(BaseWrapperDataset): """Randomly samples from a given dataset at each epoch. Sampling is done with or without replacement, depending on the "replace" parameter. Optionally, the epoch size can be rescaled. This is potentially desirable to increase per-epoch coverage of the base dataset (since sampling with replacement means that many items in the dataset will be left out). In the case of sampling without replacement, size_ratio should be strictly less than 1. Args: dataset (~torch.utils.data.Dataset): dataset on which to sample. weights (List[float]): list of probability weights (default: None, which corresponds to uniform sampling). replace (bool): sampling mode; True for "with replacement", or False for "without replacement" (default: True) size_ratio (float): the ratio to subsample to; must be positive (default: 1.0). batch_by_size (bool): whether or not to batch by sequence length (default: True). seed (int): RNG seed to use (default: 0). epoch (int): starting epoch number (default: 1). """ def __init__( self, dataset, weights=None, replace=True, size_ratio=1.0, batch_by_size=True, seed=0, epoch=1, ): super().__init__(dataset) if weights is None: self.weights = None else: assert len(weights) == len(dataset) weights_arr = np.array(weights, dtype=np.float64) weights_arr /= weights_arr.sum() self.weights = plasma_utils.PlasmaArray(weights_arr) self.replace = replace assert size_ratio > 0.0 if not self.replace: assert size_ratio < 1.0 self.size_ratio = float(size_ratio) self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int) self.batch_by_size = batch_by_size self.seed = seed self._cur_epoch = None self._cur_indices = None self.set_epoch(epoch) def __getitem__(self, index): return self.dataset[self._cur_indices.array[index]] def __len__(self): return self.actual_size @property def sizes(self): if isinstance(self.dataset.sizes, list): return [s[self._cur_indices.array] for s in self.dataset.sizes] return self.dataset.sizes[self._cur_indices.array] def num_tokens(self, index): return self.dataset.num_tokens(self._cur_indices.array[index]) def size(self, index): return self.dataset.size(self._cur_indices.array[index]) def ordered_indices(self): if self.batch_by_size: order = [ np.arange(len(self)), self.sizes, ] # No need to handle `self.shuffle == True` return np.lexsort(order) else: return np.arange(len(self)) def prefetch(self, indices): self.dataset.prefetch(self._cur_indices.array[indices]) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): logger.debug("ResamplingDataset.set_epoch: {}".format(epoch)) super().set_epoch(epoch) if epoch == self._cur_epoch: return self._cur_epoch = epoch # Generate a weighted sample of indices as a function of the # random seed and the current epoch. rng = np.random.RandomState( [ 42, # magic number self.seed % (2 ** 32), # global seed self._cur_epoch, # epoch index ] ) self._cur_indices = plasma_utils.PlasmaArray( rng.choice( len(self.dataset), self.actual_size, replace=self.replace, p=(None if self.weights is None else self.weights.array), ) )
4,316
29.835714
78
py
sign-topic
sign-topic-main/fairseq/data/dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from collections import Counter from multiprocessing import Pool import torch from fairseq import utils from fairseq.data import data_utils from fairseq.file_chunker_utils import Chunker, find_offsets from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line class Dictionary: """A mapping from symbols to consecutive integers""" def __init__( self, *, # begin keyword-only arguments bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", extra_special_symbols=None, ): self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos self.symbols = [] self.count = [] self.indices = {} self.bos_index = self.add_symbol(bos) self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(s) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def get_count(self, idx): return self.count[idx] def __len__(self): """Returns the number of symbols in the dictionary""" return len(self.symbols) def __contains__(self, sym): return sym in self.indices def index(self, sym): """Returns the index of the specified symbol""" assert isinstance(sym, str) if sym in self.indices: return self.indices[sym] return self.unk_index def string( self, tensor, bpe_symbol=None, escape_unk=False, extra_symbols_to_ignore=None, unk_string=None, include_eos=False, separator=" ", ): """Helper for converting a tensor of token indices to a string. Can optionally remove BPE symbols or escape <unk> words. """ if torch.is_tensor(tensor) and tensor.dim() == 2: return "\n".join( self.string( t, bpe_symbol, escape_unk, extra_symbols_to_ignore, include_eos=include_eos, ) for t in tensor ) extra_symbols_to_ignore = set(extra_symbols_to_ignore or []) if not include_eos: extra_symbols_to_ignore.add(self.eos()) def token_string(i): if i == self.unk(): if unk_string is not None: return unk_string else: return self.unk_string(escape_unk) else: return self[i] if hasattr(self, "bos_index"): extra_symbols_to_ignore.add(self.bos()) sent = separator.join( token_string(i) for i in tensor if utils.item(i) not in extra_symbols_to_ignore ) return data_utils.post_process(sent, bpe_symbol) def unk_string(self, escape=False): """Return unknown string, optionally escaped as: <<unk>>""" if escape: return "<{}>".format(self.unk_word) else: return self.unk_word def add_symbol(self, word, n=1, overwrite=False): """Adds a word to the dictionary""" if word in self.indices and not overwrite: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def update(self, new_dict): """Updates counts from new dictionary.""" for word in new_dict.symbols: idx2 = new_dict.indices[word] if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + new_dict.count[idx2] else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(new_dict.count[idx2]) def finalize(self, threshold=-1, nwords=-1, padding_factor=8): """Sort symbols by frequency in descending order, ignoring special ones. Args: - threshold defines the minimum word count - nwords defines the total number of words in the final dictionary, including special symbols - padding_factor can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ if nwords <= 0: nwords = len(self) new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial))) new_symbols = self.symbols[: self.nspecial] new_count = self.count[: self.nspecial] c = Counter( dict( sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :])) ) ) for symbol, count in c.most_common(nwords - self.nspecial): if count >= threshold: new_indices[symbol] = len(new_symbols) new_symbols.append(symbol) new_count.append(count) else: break assert len(new_symbols) == len(new_indices) self.count = list(new_count) self.symbols = list(new_symbols) self.indices = new_indices self.pad_to_multiple_(padding_factor) def pad_to_multiple_(self, padding_factor): """Pad Dictionary size to be a multiple of *padding_factor*.""" if padding_factor > 1: i = 0 while len(self) % padding_factor != 0: symbol = "madeupword{:04d}".format(i) self.add_symbol(symbol, n=0) i += 1 def bos(self): """Helper to get index of beginning-of-sentence symbol""" return self.bos_index def pad(self): """Helper to get index of pad symbol""" return self.pad_index def eos(self): """Helper to get index of end-of-sentence symbol""" return self.eos_index def unk(self): """Helper to get index of unk symbol""" return self.unk_index @classmethod def load(cls, f): """Loads the dictionary from a text file with the format: ``` <symbol0> <count0> <symbol1> <count1> ... ``` """ d = cls() d.add_from_file(f) return d def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception( "Incorrect encoding detected in {}, please " "rebuild the dataset".format(f) ) return lines = f.readlines() indices_start_line = self._load_meta(lines) for line in lines[indices_start_line:]: try: line, field = line.rstrip().rsplit(" ", 1) if field == "#fairseq:overwrite": overwrite = True line, field = line.rsplit(" ", 1) else: overwrite = False count = int(field) word = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(word) ) self.add_symbol(word, n=count, overwrite=overwrite) except ValueError: raise ValueError( f"Incorrect dictionary format, expected '<token> <cnt> [flags]': \"{line}\"" ) def _save(self, f, kv_iterator): if isinstance(f, str): PathManager.mkdirs(os.path.dirname(f)) with PathManager.open(f, "w", encoding="utf-8") as fd: return self.save(fd) for k, v in kv_iterator: print("{} {}".format(k, v), file=f) def _get_meta(self): return [], [] def _load_meta(self, lines): return 0 def save(self, f): """Stores dictionary into a text file""" ex_keys, ex_vals = self._get_meta() self._save( f, zip( ex_keys + self.symbols[self.nspecial :], ex_vals + self.count[self.nspecial :], ), ) def dummy_sentence(self, length): t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long() t[-1] = self.eos() return t def encode_line( self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False, ) -> torch.IntTensor: words = line_tokenizer(line) if reverse_order: words = list(reversed(words)) nwords = len(words) ids = torch.IntTensor(nwords + 1 if append_eos else nwords) for i, word in enumerate(words): if add_if_not_exist: idx = self.add_symbol(word) else: idx = self.index(word) if consumer is not None: consumer(word, idx) ids[i] = idx if append_eos: ids[nwords] = self.eos_index return ids @staticmethod def _add_file_to_dictionary_single_worker( filename, tokenize, eos_word, start_offset, end_offset, ): counter = Counter() with Chunker(filename, start_offset, end_offset) as line_iterator: for line in line_iterator: for word in tokenize(line): counter.update([word]) counter.update([eos_word]) return counter @staticmethod def add_file_to_dictionary(filename, dict, tokenize, num_workers): def merge_result(counter): for w, c in sorted(counter.items()): dict.add_symbol(w, c) local_file = PathManager.get_local_path(filename) offsets = find_offsets(local_file, num_workers) if num_workers > 1: chunks = zip(offsets, offsets[1:]) pool = Pool(processes=num_workers) results = [] for (start_offset, end_offset) in chunks: results.append( pool.apply_async( Dictionary._add_file_to_dictionary_single_worker, ( local_file, tokenize, dict.eos_word, start_offset, end_offset, ), ) ) pool.close() pool.join() for r in results: merge_result(r.get()) else: merge_result( Dictionary._add_file_to_dictionary_single_worker( local_file, tokenize, dict.eos_word, offsets[0], offsets[1] ) ) class TruncatedDictionary(object): def __init__(self, wrapped_dict, length): self.__class__ = type( wrapped_dict.__class__.__name__, (self.__class__, wrapped_dict.__class__), {}, ) self.__dict__ = wrapped_dict.__dict__ self.wrapped_dict = wrapped_dict self.length = min(len(self.wrapped_dict), length) def __len__(self): return self.length def __getitem__(self, i): if i < self.length: return self.wrapped_dict[i] return self.wrapped_dict.unk()
12,903
31.099502
96
py
sign-topic
sign-topic-main/fairseq/data/append_token_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class AppendTokenDataset(BaseWrapperDataset): def __init__(self, dataset, token=None): super().__init__(dataset) self.token = token if token is not None: self._sizes = np.array(dataset.sizes) + 1 else: self._sizes = dataset.sizes def __getitem__(self, idx): item = self.dataset[idx] if self.token is not None: item = torch.cat([item, item.new([self.token])]) return item @property def sizes(self): return self._sizes def num_tokens(self, index): n = self.dataset.num_tokens(index) if self.token is not None: n += 1 return n def size(self, index): n = self.dataset.size(index) if self.token is not None: n += 1 return n
1,065
24.380952
65
py
sign-topic
sign-topic-main/fairseq/data/fasta_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import subprocess import threading from pathlib import Path import numpy as np import torch def fasta_file_path(prefix_path): return prefix_path + ".fasta" class FastaDataset(torch.utils.data.Dataset): """ For loading protein sequence datasets in the common FASTA data format """ def __init__(self, path: str, cache_indices=False): self.fn = fasta_file_path(path) self.threadlocal = threading.local() self.cache = Path(f"{path}.fasta.idx.npy") if cache_indices: if self.cache.exists(): self.offsets, self.sizes = np.load(self.cache) else: self.offsets, self.sizes = self._build_index(path) np.save(self.cache, np.stack([self.offsets, self.sizes])) else: self.offsets, self.sizes = self._build_index(path) def _get_file(self): if not hasattr(self.threadlocal, "f"): self.threadlocal.f = open(self.fn, "r") return self.threadlocal.f def __getitem__(self, idx): f = self._get_file() f.seek(self.offsets[idx]) desc = f.readline().strip() line = f.readline() seq = "" while line != "" and line[0] != ">": seq += line.strip() line = f.readline() return desc, seq def __len__(self): return self.offsets.size def _build_index(self, path: str): # Use grep and awk to get 100M/s on local SSD. # Should process your enormous 100G fasta in ~10 min single core... path = fasta_file_path(path) bytes_offsets = subprocess.check_output( f"cat {path} | tqdm --bytes --total $(wc -c < {path})" "| grep --byte-offset '^>' -o | cut -d: -f1", shell=True, ) fasta_lengths = subprocess.check_output( f"cat {path} | tqdm --bytes --total $(wc -c < {path})" "| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'", shell=True, ) bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ") sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ") return bytes_np, sizes_np def __setstate__(self, state): self.__dict__ = state self.threadlocal = threading.local() def __getstate__(self): d = {} for i, v in self.__dict__.items(): if i != "threadlocal": d[i] = v return d def __del__(self): if hasattr(self.threadlocal, "f"): self.threadlocal.f.close() del self.threadlocal.f @staticmethod def exists(path): return os.path.exists(fasta_file_path(path)) class EncodedFastaDataset(FastaDataset): """ The FastaDataset returns raw sequences - this allows us to return indices with a dictionary instead. """ def __init__(self, path, dictionary): super().__init__(path, cache_indices=True) self.dictionary = dictionary def __getitem__(self, idx): desc, seq = super().__getitem__(idx) return self.dictionary.encode_line(seq, line_tokenizer=list).long()
3,387
30.37037
107
py
sign-topic
sign-topic-main/fairseq/data/mask_tokens_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from functools import lru_cache import numpy as np import torch from fairseq.data import Dictionary, data_utils from . import BaseWrapperDataset, LRUCacheDataset class MaskTokensDataset(BaseWrapperDataset): """ A wrapper Dataset for masked language modeling. Input items are masked according to the specified masking probability. Args: dataset: Dataset to wrap. sizes: Sentence lengths vocab: Dictionary with the vocabulary and special tokens. pad_idx: Id of pad token in vocab mask_idx: Id of mask token in vocab return_masked_tokens: controls whether to return the non-masked tokens (the default) or to return a tensor with the original masked token IDs (and *pad_idx* elsewhere). The latter is useful as targets for masked LM training. seed: Seed for random number generator for reproducibility. mask_prob: probability of replacing a token with *mask_idx*. leave_unmasked_prob: probability that a masked token is unmasked. random_token_prob: probability of replacing a masked token with a random token from the vocabulary. freq_weighted_replacement: sample random replacement words based on word frequencies in the vocab. mask_whole_words: only mask whole words. This should be a byte mask over vocab indices, indicating whether it is the beginning of a word. We will extend any mask to encompass the whole word. bpe: BPE to use for whole-word masking. mask_multiple_length : repeat each mask index multiple times. Default value is 1. mask_stdev : standard deviation of masks distribution in case of multiple masking. Default value is 0. """ @classmethod def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs): """Return the source and target datasets for masked LM training.""" dataset = LRUCacheDataset(dataset) return ( LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)), LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)), ) def __init__( self, dataset: torch.utils.data.Dataset, vocab: Dictionary, pad_idx: int, mask_idx: int, return_masked_tokens: bool = False, seed: int = 1, mask_prob: float = 0.15, leave_unmasked_prob: float = 0.1, random_token_prob: float = 0.1, freq_weighted_replacement: bool = False, mask_whole_words: torch.Tensor = None, mask_multiple_length: int = 1, mask_stdev: float = 0.0, ): assert 0.0 < mask_prob < 1.0 assert 0.0 <= random_token_prob <= 1.0 assert 0.0 <= leave_unmasked_prob <= 1.0 assert random_token_prob + leave_unmasked_prob <= 1.0 assert mask_multiple_length >= 1 assert mask_stdev >= 0.0 self.dataset = dataset self.vocab = vocab self.pad_idx = pad_idx self.mask_idx = mask_idx self.return_masked_tokens = return_masked_tokens self.seed = seed self.mask_prob = mask_prob self.leave_unmasked_prob = leave_unmasked_prob self.random_token_prob = random_token_prob self.mask_whole_words = mask_whole_words self.mask_multiple_length = mask_multiple_length self.mask_stdev = mask_stdev if random_token_prob > 0.0: if freq_weighted_replacement: weights = np.array(self.vocab.count) else: weights = np.ones(len(self.vocab)) weights[: self.vocab.nspecial] = 0 self.weights = weights / weights.sum() self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the noise changes, not item sizes def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, index: int): return self.__getitem_cached__(self.seed, self.epoch, index) @lru_cache(maxsize=8) def __getitem_cached__(self, seed: int, epoch: int, index: int): with data_utils.numpy_seed(self.seed, self.epoch, index): item = self.dataset[index] sz = len(item) assert ( self.mask_idx not in item ), "Dataset contains mask_idx (={}), this is not expected!".format( self.mask_idx, ) if self.mask_whole_words is not None: word_begins_mask = self.mask_whole_words.gather(0, item) word_begins_idx = word_begins_mask.nonzero().view(-1) sz = len(word_begins_idx) words = np.split(word_begins_mask, word_begins_idx)[1:] assert len(words) == sz word_lens = list(map(len, words)) # decide elements to mask mask = np.full(sz, False) num_mask = int( # add a random number for probabilistic rounding self.mask_prob * sz / float(self.mask_multiple_length) + np.random.rand() ) # multiple masking as described in the vq-wav2vec paper (https://arxiv.org/abs/1910.05453) mask_idc = np.random.choice(sz, num_mask, replace=False) if self.mask_stdev > 0.0: lengths = np.random.normal( self.mask_multiple_length, self.mask_stdev, size=num_mask ) lengths = [max(0, int(round(x))) for x in lengths] mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ], dtype=np.int64, ) else: mask_idc = np.concatenate( [mask_idc + i for i in range(self.mask_multiple_length)] ) mask_idc = mask_idc[mask_idc < len(mask)] try: mask[mask_idc] = True except: # something wrong print( "Assigning mask indexes {} to mask {} failed!".format( mask_idc, mask ) ) raise if self.return_masked_tokens: # exit early if we're just returning the masked tokens # (i.e., the targets for masked LM training) if self.mask_whole_words is not None: mask = np.repeat(mask, word_lens) new_item = np.full(len(mask), self.pad_idx) new_item[mask] = item[torch.from_numpy(mask.astype(np.uint8)) == 1] return torch.from_numpy(new_item) # decide unmasking and random replacement rand_or_unmask_prob = self.random_token_prob + self.leave_unmasked_prob if rand_or_unmask_prob > 0.0: rand_or_unmask = mask & (np.random.rand(sz) < rand_or_unmask_prob) if self.random_token_prob == 0.0: unmask = rand_or_unmask rand_mask = None elif self.leave_unmasked_prob == 0.0: unmask = None rand_mask = rand_or_unmask else: unmask_prob = self.leave_unmasked_prob / rand_or_unmask_prob decision = np.random.rand(sz) < unmask_prob unmask = rand_or_unmask & decision rand_mask = rand_or_unmask & (~decision) else: unmask = rand_mask = None if unmask is not None: mask = mask ^ unmask if self.mask_whole_words is not None: mask = np.repeat(mask, word_lens) new_item = np.copy(item) new_item[mask] = self.mask_idx if rand_mask is not None: num_rand = rand_mask.sum() if num_rand > 0: if self.mask_whole_words is not None: rand_mask = np.repeat(rand_mask, word_lens) num_rand = rand_mask.sum() new_item[rand_mask] = np.random.choice( len(self.vocab), num_rand, p=self.weights, ) return torch.from_numpy(new_item)
8,777
38.719457
102
py
sign-topic
sign-topic-main/fairseq/data/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect import numpy as np from torch.utils.data.dataloader import default_collate from . import FairseqDataset class ConcatDataset(FairseqDataset): @staticmethod def cumsum(sequence, sample_ratios): r, s = [], 0 for e, ratio in zip(sequence, sample_ratios): curr_len = int(ratio * len(e)) r.append(curr_len + s) s += curr_len return r def __init__(self, datasets, sample_ratios=1): super(ConcatDataset, self).__init__() assert len(datasets) > 0, "datasets should not be an empty iterable" self.datasets = list(datasets) if isinstance(sample_ratios, int): sample_ratios = [sample_ratios] * len(self.datasets) self.sample_ratios = sample_ratios self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios) self.real_sizes = [len(d) for d in self.datasets] def __len__(self): return self.cumulative_sizes[-1] def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx][sample_idx] def _get_dataset_and_sample_index(self, idx: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] sample_idx = sample_idx % self.real_sizes[dataset_idx] return dataset_idx, sample_idx def collater(self, samples, **extra_args): # For now only supports datasets with same underlying collater implementations if hasattr(self.datasets[0], "collater"): return self.datasets[0].collater(samples, **extra_args) else: return default_collate(samples, **extra_args) def size(self, idx: int): """ Return an example's size as a float or tuple. """ dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) return self.datasets[dataset_idx].size(sample_idx) def num_tokens(self, index: int): return np.max(self.size(index)) def attr(self, attr: str, index: int): dataset_idx = bisect.bisect_right(self.cumulative_sizes, index) return getattr(self.datasets[dataset_idx], attr, None) @property def sizes(self): _dataset_sizes = [] for ds, sr in zip(self.datasets, self.sample_ratios): if isinstance(ds.sizes, np.ndarray): _dataset_sizes.append(np.tile(ds.sizes, sr)) else: # Only support underlying dataset with single size array. assert isinstance(ds.sizes, list) _dataset_sizes.append(np.tile(ds.sizes[0], sr)) return np.concatenate(_dataset_sizes) @property def supports_prefetch(self): return all(d.supports_prefetch for d in self.datasets) def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1: # special handling for concatenating lang_pair_datasets indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = ( sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None ) src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(src_sizes[indices], kind="mergesort")] else: return np.argsort(self.sizes) def prefetch(self, indices): frm = 0 for to, ds in zip(self.cumulative_sizes, self.datasets): real_size = len(ds) if getattr(ds, "supports_prefetch", False): ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to]) frm = to @property def can_reuse_epoch_itr_across_epochs(self): return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, "set_epoch"): ds.set_epoch(epoch)
4,645
36.168
86
py
sign-topic
sign-topic-main/fairseq/data/data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. try: from collections.abc import Iterable except ImportError: from collections import Iterable import contextlib import itertools import logging import re import warnings from typing import Optional, Tuple import numpy as np import torch from fairseq.file_io import PathManager from fairseq import utils import os logger = logging.getLogger(__name__) def infer_language_pair(path): """Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx""" src, dst = None, None for filename in PathManager.ls(path): parts = filename.split(".") if len(parts) >= 3 and len(parts[1].split("-")) == 2: return parts[1].split("-") return src, dst def collate_tokens( values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False, pad_to_length=None, pad_to_multiple=1, pad_to_bsz=None, ): """Convert a list of 1d tensors into a padded 2d tensor.""" size = max(v.size(0) for v in values) size = size if pad_to_length is None else max(size, pad_to_length) if pad_to_multiple != 1 and size % pad_to_multiple != 0: size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple) batch_size = len(values) if pad_to_bsz is None else max(len(values), pad_to_bsz) res = values[0].new(batch_size, size).fill_(pad_idx) def copy_tensor(src, dst): assert dst.numel() == src.numel() if move_eos_to_beginning: if eos_idx is None: # if no eos_idx is specified, then use the last token in src dst[0] = src[-1] else: dst[0] = eos_idx dst[1:] = src[:-1] else: dst.copy_(src) for i, v in enumerate(values): copy_tensor(v, res[i][size - len(v) :] if left_pad else res[i][: len(v)]) return res def load_indexed_dataset( path, dictionary=None, dataset_impl=None, combine=False, default="cached" ): """A helper function for loading indexed datasets. Args: path (str): path to indexed dataset (e.g., 'data-bin/train') dictionary (~fairseq.data.Dictionary): data dictionary dataset_impl (str, optional): which dataset implementation to use. If not provided, it will be inferred automatically. For legacy indexed data we use the 'cached' implementation by default. combine (bool, optional): automatically load and combine multiple datasets. For example, if *path* is 'data-bin/train', then we will combine 'data-bin/train', 'data-bin/train1', ... and return a single ConcatDataset instance. """ import fairseq.data.indexed_dataset as indexed_dataset from fairseq.data.concat_dataset import ConcatDataset datasets = [] for k in itertools.count(): path_k = path + (str(k) if k > 0 else "") try: path_k = indexed_dataset.get_indexed_dataset_to_local(path_k) except Exception as e: if "StorageException: [404] Path not found" in str(e): logger.warning(f"path_k: {e} not found") else: raise e dataset_impl_k = dataset_impl if dataset_impl_k is None: dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k) dataset = indexed_dataset.make_dataset( path_k, impl=dataset_impl_k or default, fix_lua_indexing=True, dictionary=dictionary, ) if dataset is None: break logger.info("loaded {:,} examples from: {}".format(len(dataset), path_k)) datasets.append(dataset) if not combine: break if len(datasets) == 0: return None elif len(datasets) == 1: return datasets[0] else: return ConcatDataset(datasets) @contextlib.contextmanager def numpy_seed(seed, *addl_seeds): """Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward""" if seed is None: yield return if len(addl_seeds) > 0: seed = int(hash((seed, *addl_seeds)) % 1e6) state = np.random.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(state) def collect_filtered(function, iterable, filtered): """ Similar to :func:`filter` but collects filtered elements in ``filtered``. Args: function (callable): function that returns ``False`` for elements that should be filtered iterable (iterable): iterable to filter filtered (list): list to store filtered elements """ for el in iterable: if function(el): yield el else: filtered.append(el) def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False): def compare_leq(a, b): return a <= b if not isinstance(a, tuple) else max(a) <= b def check_size(idx): if isinstance(max_positions, float) or isinstance(max_positions, int): return compare_leq(size_fn(idx), max_positions) elif isinstance(max_positions, dict): idx_size = size_fn(idx) assert isinstance(idx_size, dict) intersect_keys = set(max_positions.keys()) & set(idx_size.keys()) return all( all( a is None or b is None or a <= b for a, b in zip(idx_size[key], max_positions[key]) ) for key in intersect_keys ) else: # For MultiCorpusSampledDataset, will generalize it later if not isinstance(size_fn(idx), Iterable): return all(size_fn(idx) <= b for b in max_positions) return all( a is None or b is None or a <= b for a, b in zip(size_fn(idx), max_positions) ) ignored = [] itr = collect_filtered(check_size, indices, ignored) indices = np.fromiter(itr, dtype=np.int64, count=-1) return indices, ignored def filter_by_size(indices, dataset, max_positions, raise_exception=False): """ [deprecated] Filter indices based on their size. Use `FairseqDataset::filter_indices_by_size` instead. Args: indices (List[int]): ordered list of dataset indices dataset (FairseqDataset): fairseq dataset instance max_positions (tuple): filter elements larger than this size. Comparisons are done component-wise. raise_exception (bool, optional): if ``True``, raise an exception if any elements are filtered (default: False). """ warnings.warn( "data_utils.filter_by_size is deprecated. " "Use `FairseqDataset::filter_indices_by_size` instead.", stacklevel=2, ) if isinstance(max_positions, float) or isinstance(max_positions, int): if hasattr(dataset, "sizes") and isinstance(dataset.sizes, np.ndarray): ignored = indices[dataset.sizes[indices] > max_positions].tolist() indices = indices[dataset.sizes[indices] <= max_positions] elif ( hasattr(dataset, "sizes") and isinstance(dataset.sizes, list) and len(dataset.sizes) == 1 ): ignored = indices[dataset.sizes[0][indices] > max_positions].tolist() indices = indices[dataset.sizes[0][indices] <= max_positions] else: indices, ignored = _filter_by_size_dynamic( indices, dataset.size, max_positions ) else: indices, ignored = _filter_by_size_dynamic(indices, dataset.size, max_positions) if len(ignored) > 0 and raise_exception: raise Exception( ( "Size of sample #{} is invalid (={}) since max_positions={}, " "skip this example with --skip-invalid-size-inputs-valid-test" ).format(ignored[0], dataset.size(ignored[0]), max_positions) ) if len(ignored) > 0: logger.warning( ( "{} samples have invalid sizes and will be skipped, " "max_positions={}, first few sample ids={}" ).format(len(ignored), max_positions, ignored[:10]) ) return indices def filter_paired_dataset_indices_by_size(src_sizes, tgt_sizes, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if max_sizes is None: return indices, [] if type(max_sizes) in (int, float): max_src_size, max_tgt_size = max_sizes, max_sizes else: max_src_size, max_tgt_size = max_sizes if tgt_sizes is None: ignored = indices[src_sizes[indices] > max_src_size] else: ignored = indices[ (src_sizes[indices] > max_src_size) | (tgt_sizes[indices] > max_tgt_size) ] if len(ignored) > 0: if tgt_sizes is None: indices = indices[src_sizes[indices] <= max_src_size] else: indices = indices[ (src_sizes[indices] <= max_src_size) & (tgt_sizes[indices] <= max_tgt_size) ] return indices, ignored.tolist() def batch_by_size( indices, num_tokens_fn, num_tokens_vec=None, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, fixed_shapes=None, ): """ Yield mini-batches of indices bucketed by size. Batches may contain sequences of different lengths. Args: indices (List[int]): ordered list of dataset indices num_tokens_fn (callable): function that returns the number of tokens at a given index num_tokens_vec (List[int], optional): precomputed vector of the number of tokens for each index in indices (to enable faster batch generation) max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). required_batch_size_multiple (int, optional): require batch size to be less than N or a multiple of N (default: 1). fixed_shapes (List[Tuple[int, int]], optional): if given, batches will only be created with the given shapes. *max_sentences* and *required_batch_size_multiple* will be ignored (default: None). """ try: from fairseq.data.data_utils_fast import ( batch_by_size_fn, batch_by_size_vec, batch_fixed_shapes_fast, ) except ImportError: raise ImportError( "Please build Cython components with: " "`python setup.py build_ext --inplace`" ) except ValueError: raise ValueError( "Please build (or rebuild) Cython components with `python setup.py build_ext --inplace`." ) # added int() to avoid TypeError: an integer is required max_tokens = int(max_tokens) if max_tokens is not None else -1 max_sentences = max_sentences if max_sentences is not None else -1 bsz_mult = required_batch_size_multiple if not isinstance(indices, np.ndarray): indices = np.fromiter(indices, dtype=np.int64, count=-1) if num_tokens_vec is not None and not isinstance(num_tokens_vec, np.ndarray): num_tokens_vec = np.fromiter(num_tokens_vec, dtype=np.int64, count=-1) if fixed_shapes is None: if num_tokens_vec is None: return batch_by_size_fn( indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult, ) else: return batch_by_size_vec( indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ) else: fixed_shapes = np.array(fixed_shapes, dtype=np.int64) sort_order = np.lexsort( [ fixed_shapes[:, 1].argsort(), # length fixed_shapes[:, 0].argsort(), # bsz ] ) fixed_shapes_sorted = fixed_shapes[sort_order] return batch_fixed_shapes_fast(indices, num_tokens_fn, fixed_shapes_sorted) def post_process(sentence: str, symbol: str): if symbol == "sentencepiece": sentence = sentence.replace(" ", "").replace("\u2581", " ").strip() elif symbol == "wordpiece": sentence = sentence.replace(" ", "").replace("_", " ").strip() elif symbol == "letter": sentence = sentence.replace(" ", "").replace("|", " ").strip() elif symbol == "silence": import re sentence = sentence.replace("<SIL>", "") sentence = re.sub(" +", " ", sentence).strip() elif symbol == "_EOW": sentence = sentence.replace(" ", "").replace("_EOW", " ").strip() elif symbol in {"subword_nmt", "@@ ", "@@"}: if symbol == "subword_nmt": symbol = "@@ " sentence = (sentence + " ").replace(symbol, "").rstrip() elif symbol == "none": pass elif symbol is not None: raise NotImplementedError(f"Unknown post_process option: {symbol}") return sentence def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans """ bsz, all_sz = shape mask = np.full((bsz, all_sz), False) all_num_mask = int( # add a random number for probabilistic rounding mask_prob * all_sz / float(mask_length) + np.random.rand() ) all_num_mask = max(min_masks, all_num_mask) mask_idcs = [] for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() num_mask = int( # add a random number for probabilistic rounding mask_prob * sz / float(mask_length) + np.random.rand() ) num_mask = max(min_masks, num_mask) else: sz = all_sz num_mask = all_num_mask if mask_type == "static": lengths = np.full(num_mask, mask_length) elif mask_type == "uniform": lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) elif mask_type == "normal": lengths = np.random.normal(mask_length, mask_other, size=num_mask) lengths = [max(1, int(round(x))) for x in lengths] elif mask_type == "poisson": lengths = np.random.poisson(mask_length, size=num_mask) lengths = [int(round(x)) for x in lengths] else: raise Exception("unknown mask selection " + mask_type) if sum(lengths) == 0: lengths[0] = min(mask_length, sz - 1) if no_overlap: mask_idc = [] def arrange(s, e, length, keep_length): span_start = np.random.randint(s, e - length) mask_idc.extend(span_start + i for i in range(length)) new_parts = [] if span_start - s - min_space >= keep_length: new_parts.append((s, span_start - min_space + 1)) if e - span_start - keep_length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): lens = np.fromiter( (e - s if e - s >= length + min_space else 0 for s, e in parts), np.int, ) l_sum = np.sum(lens) if l_sum == 0: break probs = lens / np.sum(lens) c = np.random.choice(len(parts), p=probs) s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = np.asarray(mask_idc) else: min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ] ) mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): if len(mask_idc) > min_len: mask_idc = np.random.choice(mask_idc, min_len, replace=False) mask[i, mask_idc] = True return mask def get_mem_usage(): try: import psutil mb = 1024 * 1024 return f"used={psutil.virtual_memory().used / mb}Mb; avail={psutil.virtual_memory().available / mb}Mb" except ImportError: return "N/A" # lens: torch.LongTensor # returns: torch.BoolTensor def lengths_to_padding_mask(lens): bsz, max_lens = lens.size(0), torch.max(lens).item() mask = torch.arange(max_lens).to(lens.device).view(1, max_lens) mask = mask.expand(bsz, -1) >= lens.view(bsz, 1).expand(-1, max_lens) return mask # lens: torch.LongTensor # returns: torch.BoolTensor def lengths_to_mask(lens): return ~lengths_to_padding_mask(lens) def get_buckets(sizes, num_buckets): buckets = np.unique( np.percentile( sizes, np.linspace(0, 100, num_buckets + 1), interpolation="lower", )[1:] ) return buckets def get_bucketed_sizes(orig_sizes, buckets): sizes = np.copy(orig_sizes) assert np.min(sizes) >= 0 start_val = -1 for end_val in buckets: mask = (sizes > start_val) & (sizes <= end_val) sizes[mask] = end_val start_val = end_val return sizes def _find_extra_valid_paths(dataset_path: str) -> set: paths = utils.split_paths(dataset_path) all_valid_paths = set() for sub_dir in paths: contents = PathManager.ls(sub_dir) valid_paths = [c for c in contents if re.match("valid*[0-9].*", c) is not None] all_valid_paths |= {os.path.basename(p) for p in valid_paths} # Remove .bin, .idx etc roots = {os.path.splitext(p)[0] for p in all_valid_paths} return roots def raise_if_valid_subsets_unintentionally_ignored(train_cfg) -> None: """Raises if there are paths matching 'valid*[0-9].*' which are not combined or ignored.""" if ( train_cfg.dataset.ignore_unused_valid_subsets or train_cfg.dataset.combine_valid_subsets or train_cfg.dataset.disable_validation or not hasattr(train_cfg.task, "data") ): return other_paths = _find_extra_valid_paths(train_cfg.task.data) specified_subsets = train_cfg.dataset.valid_subset.split(",") ignored_paths = [p for p in other_paths if p not in specified_subsets] if ignored_paths: advice = "Set --combine-val to combine them or --ignore-unused-valid-subsets to ignore them." msg = f"Valid paths {ignored_paths} will be ignored. {advice}" raise ValueError(msg)
21,294
34.789916
120
py
sign-topic
sign-topic-main/fairseq/data/nested_dictionary_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict import torch from torch.utils.data.dataloader import default_collate from . import FairseqDataset def _flatten(dico, prefix=None): """Flatten a nested dictionary.""" new_dico = OrderedDict() if isinstance(dico, dict): prefix = prefix + "." if prefix is not None else "" for k, v in dico.items(): if v is None: continue new_dico.update(_flatten(v, prefix + k)) elif isinstance(dico, list): for i, v in enumerate(dico): new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]")) else: new_dico = OrderedDict({prefix: dico}) return new_dico def _unflatten(dico): """Unflatten a flattened dictionary into a nested dictionary.""" new_dico = OrderedDict() for full_k, v in dico.items(): full_k = full_k.split(".") node = new_dico for k in full_k[:-1]: if k.startswith("[") and k.endswith("]"): k = int(k[1:-1]) if k not in node: node[k] = OrderedDict() node = node[k] node[full_k[-1]] = v return new_dico class NestedDictionaryDataset(FairseqDataset): def __init__(self, defn, sizes=None): super().__init__() self.defn = _flatten(defn) self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes first = None for v in self.defn.values(): if not isinstance( v, ( FairseqDataset, torch.utils.data.Dataset, ), ): raise ValueError("Expected Dataset but found: {}".format(v.__class__)) first = first or v if len(v) > 0: assert len(v) == len(first), "dataset lengths must match" self._len = len(first) def __getitem__(self, index): return OrderedDict((k, ds[index]) for k, ds in self.defn.items()) def __len__(self): return self._len def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ if len(samples) == 0: return {} sample = OrderedDict() for k, ds in self.defn.items(): try: sample[k] = ds.collater([s[k] for s in samples]) except NotImplementedError: sample[k] = default_collate([s[k] for s in samples]) return _unflatten(sample) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max(s[index] for s in self.sizes) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if len(self.sizes) == 1: return self.sizes[0][index] else: return (s[index] for s in self.sizes) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return any(ds.supports_prefetch for ds in self.defn.values()) def prefetch(self, indices): """Prefetch the data required for this epoch.""" for ds in self.defn.values(): if getattr(ds, "supports_prefetch", False): ds.prefetch(indices) @property def can_reuse_epoch_itr_across_epochs(self): return all(ds.can_reuse_epoch_itr_across_epochs for ds in self.defn.values()) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.defn.values(): ds.set_epoch(epoch)
4,029
30.984127
86
py
sign-topic
sign-topic-main/fairseq/data/add_target_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset, data_utils from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel class AddTargetDataset(BaseWrapperDataset): def __init__( self, dataset, labels, pad, eos, batch_targets, process_label=None, label_len_fn=None, add_to_input=False, text_compression_level=TextCompressionLevel.none, ): super().__init__(dataset) self.labels = labels self.batch_targets = batch_targets self.pad = pad self.eos = eos self.process_label = process_label self.label_len_fn = label_len_fn self.add_to_input = add_to_input self.text_compressor = TextCompressor(level=text_compression_level) def get_label(self, index, process_fn=None): lbl = self.labels[index] lbl = self.text_compressor.decompress(lbl) return lbl if process_fn is None else process_fn(lbl) def __getitem__(self, index): item = self.dataset[index] item["label"] = self.get_label(index, process_fn=self.process_label) return item def size(self, index): sz = self.dataset.size(index) own_sz = self.label_len_fn(self.get_label(index)) return sz, own_sz def collater(self, samples): collated = self.dataset.collater(samples) if len(collated) == 0: return collated indices = set(collated["id"].tolist()) target = [s["label"] for s in samples if s["id"] in indices] if self.batch_targets: collated["target_lengths"] = torch.LongTensor([len(t) for t in target]) target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False) collated["ntokens"] = collated["target_lengths"].sum().item() else: collated["ntokens"] = sum([len(t) for t in target]) collated["target"] = target if self.add_to_input: eos = target.new_full((target.size(0), 1), self.eos) collated["target"] = torch.cat([target, eos], dim=-1).long() collated["net_input"]["prev_output_tokens"] = torch.cat( [eos, target], dim=-1 ).long() collated["ntokens"] += target.size(0) return collated def filter_indices_by_size(self, indices, max_sizes): indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored
2,718
33.417722
88
py
sign-topic
sign-topic-main/fairseq/data/transform_eos_lang_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch from . import FairseqDataset class TransformEosLangPairDataset(FairseqDataset): """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on collated samples of language pair dataset. Note that the transformation is applied in :func:`collater`. Args: dataset (~fairseq.data.FairseqDataset): dataset that collates sample into LanguagePairDataset schema src_eos (int): original source end-of-sentence symbol index to be replaced new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the beginning of 'prev_output_tokens' """ def __init__( self, dataset: FairseqDataset, src_eos: int, new_src_eos: Optional[int] = None, tgt_bos: Optional[int] = None, new_tgt_bos: Optional[int] = None, ): self.dataset = dataset self.src_eos = src_eos self.new_src_eos = new_src_eos self.tgt_bos = tgt_bos self.new_tgt_bos = new_tgt_bos def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples, **extra_args): samples = self.dataset.collater(samples, **extra_args) if len(samples) == 0: return samples if "net_input" not in samples: return samples if self.new_src_eos is not None: if self.dataset.left_pad_source: assert ( samples["net_input"]["src_tokens"][:, -1] != self.src_eos ).sum() == 0 samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos else: eos_idx = samples["net_input"]["src_lengths"] - 1 assert ( samples["net_input"]["src_tokens"][ torch.arange(eos_idx.size(0)), eos_idx ] != self.src_eos ).sum() == 0 eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1) samples["net_input"]["src_tokens"].scatter_( 1, eos_idx, self.new_src_eos ) if ( self.new_tgt_bos is not None and "prev_output_tokens" in samples["net_input"] ): if self.dataset.left_pad_target: # TODO: support different padding direction on target side raise NotImplementedError( "TransformEosLangPairDataset does not implement --left-pad-target True option" ) else: assert ( samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos ).sum() == 0 samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos return samples def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) @property def sizes(self): # dataset.sizes can be a dynamically computed sizes: return self.dataset.sizes def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
3,856
32.833333
98
py
sign-topic
sign-topic-main/fairseq/data/lm_context_window_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from typing import Dict from fairseq.data.monolingual_dataset import MonolingualDataset from . import FairseqDataset class LMContextWindowDataset(FairseqDataset): """ Wraps a MonolingualDataset and provides more context for evaluation. Each item in the new dataset will have a maximum size of ``tokens_per_sample + context_window``. Args: dataset: dataset to wrap tokens_per_sample (int): the max number of tokens in each dataset item context_window (int): the number of accumulated tokens to add to each dataset item pad_idx (int): padding symbol """ def __init__( self, dataset: MonolingualDataset, tokens_per_sample: int, context_window: int, pad_idx: int, ): assert context_window > 0 self.dataset = dataset self.tokens_per_sample = tokens_per_sample self.context_window = context_window self.pad_idx = pad_idx self.prev_tokens = np.empty([0]) def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples) -> Dict: sample = self.dataset.collater(samples) pad = self.pad_idx max_sample_len = self.tokens_per_sample + self.context_window bsz, tsz = sample["net_input"]["src_tokens"].shape start_idxs = [0] * bsz toks = sample["net_input"]["src_tokens"] lengths = sample["net_input"]["src_lengths"] tgt = sample["target"] new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64) new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64) sample_lens = toks.ne(pad).long().sum(dim=1).cpu() for i in range(bsz): sample_len = sample_lens[i] extra = len(self.prev_tokens) + sample_len - max_sample_len if extra > 0: self.prev_tokens = self.prev_tokens[extra:] pads = np.full(self.context_window - len(self.prev_tokens), pad) new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads]) new_tgt[ i, len(self.prev_tokens) : len(self.prev_tokens) + len(tgt[i]) ] = tgt[i] start_idxs[i] = len(self.prev_tokens) lengths[i] += len(self.prev_tokens) self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window :] sample["net_input"]["src_tokens"] = torch.from_numpy(new_toks) sample["target"] = torch.from_numpy(new_tgt) sample["start_indices"] = start_idxs return sample def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): # NOTE we don't shuffle the data to retain access to the previous dataset elements return np.arange(len(self.dataset)) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
3,381
33.510204
90
py
sign-topic
sign-topic-main/fairseq/data/colorize_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset class ColorizeDataset(BaseWrapperDataset): """Adds 'colors' property to net input that is obtained from the provided color getter for use by models""" def __init__(self, dataset, color_getter): super().__init__(dataset) self.color_getter = color_getter def collater(self, samples): base_collate = super().collater(samples) if len(base_collate) > 0: base_collate["net_input"]["colors"] = torch.tensor( list(self.color_getter(self.dataset, s["id"]) for s in samples), dtype=torch.long, ) return base_collate
843
31.461538
111
py
sign-topic
sign-topic-main/fairseq/data/iterators.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import math import operator import os import queue import time from threading import Thread import numpy as np import torch from fairseq.data import data_utils logger = logging.getLogger(__name__) # Object used by _background_consumer to signal the source is exhausted # to the main thread. _sentinel = object() class CountingIterator(object): """Wrapper around an iterable that maintains the iteration count. Args: iterable (iterable): iterable to wrap start (int): starting iteration count. Note that this doesn't actually advance the iterator. total (int): override the iterator length returned by ``__len``. This can be used to truncate *iterator*. Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, start=None, total=None): self._itr = iter(iterable) self.n = start or getattr(iterable, "n", 0) self.total = total if total is not None else self.n + len(iterable) def __len__(self): return self.total def __iter__(self): return self def __next__(self): if not self.has_next(): raise StopIteration try: x = next(self._itr) except StopIteration: raise IndexError( f"Iterator expected to have length {self.total}, " "but exhausted at position {self.n}." ) self.n += 1 return x def has_next(self): """Whether the iterator has been exhausted.""" return self.n < self.total def skip(self, n): """Fast-forward the iterator by skipping n elements.""" for _ in range(n): next(self) return self def take(self, n): """Truncate the iterator to n elements at most.""" self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._itr, "take"): self._itr.take(max(n - self.n, 0)) return self class EpochBatchIterating(object): def __len__(self) -> int: raise NotImplementedError @property def next_epoch_idx(self): raise NotImplementedError def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus (bool, optional): ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). set_dataset_epoch (bool, optional): update the wrapped Dataset with the new epoch number (default: True). """ raise NotImplementedError def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" raise NotImplementedError @property def iterations_in_epoch(self) -> int: """The number of consumed batches in the current epoch.""" raise NotImplementedError def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" raise NotImplementedError def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" raise NotImplementedError @property def first_batch(self): return "DUMMY" class StreamingEpochBatchIterator(EpochBatchIterating): """A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`. Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data max_sentences: batch size collate_fn (callable): merges a list of samples to form a mini-batch num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative (default: ``0``). """ def __init__( self, dataset, max_sentences=1, collate_fn=None, epoch=1, num_workers=0, buffer_size=0, timeout=0, ): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.max_sentences = max_sentences self.collate_fn = collate_fn self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self._current_epoch_iterator = None @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._current_epoch_iterator is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): self.epoch = self.next_epoch_idx if set_dataset_epoch and hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(self.epoch) self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle) return self._current_epoch_iterator def end_of_epoch(self) -> bool: return not self._current_epoch_iterator.has_next() @property def iterations_in_epoch(self) -> int: if self._current_epoch_iterator is not None: return self._current_epoch_iterator.n return 0 def state_dict(self): return { "epoch": self.epoch, } def load_state_dict(self, state_dict): self.epoch = state_dict["epoch"] def _get_iterator_for_epoch(self, epoch, shuffle, offset=0): if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" # Create data loader worker_init_fn = getattr(self.dataset, "worker_init_fn", None) itr = torch.utils.data.DataLoader( self.dataset, batch_size=self.max_sentences, collate_fn=self.collate_fn, num_workers=self.num_workers, timeout=self.timeout, worker_init_fn=worker_init_fn, pin_memory=True, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CountingIterator itr = CountingIterator(itr, start=offset) return itr class EpochBatchIterator(EpochBatchIterating): """A multi-epoch iterator over a :class:`torch.utils.data.Dataset`. Compared to :class:`torch.utils.data.DataLoader`, this iterator: - can be reused across multiple epochs with the :func:`next_epoch_itr` method (optionally shuffled between epochs) - can be serialized/deserialized with the :func:`state_dict` and :func:`load_state_dict` methods - supports sharding with the *num_shards* and *shard_id* arguments Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data collate_fn (callable): merges a list of samples to form a mini-batch batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of indices, or a callable to create such an iterator (~torch.utils.data.Sampler). A callable batch_sampler will be called for each epoch to enable per epoch dynamic batch iterators defined by this callable batch_sampler. seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative (default: ``0``). disable_shuffling (bool, optional): force disable shuffling (default: ``False``). skip_remainder_batch (bool, optional): if set, discard the last batch in an epoch for the sake of training stability, as the last batch is usually smaller than local_batch_size * distributed_word_size (default: ``False``). grouped_shuffling (bool, optional): enable shuffling batches in groups of num_shards. Ensures that each GPU receives similar length sequences when batches are sorted by length. """ def __init__( self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, buffer_size=0, timeout=0, disable_shuffling=False, skip_remainder_batch=False, grouped_shuffling=False, ): assert isinstance(dataset, torch.utils.data.Dataset) self.dataset = dataset self.collate_fn = collate_fn self.batch_sampler = batch_sampler self._frozen_batches = ( tuple(batch_sampler) if not callable(batch_sampler) else None ) self.seed = seed self.num_shards = num_shards self.shard_id = shard_id self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self.disable_shuffling = disable_shuffling self.skip_remainder_batch = skip_remainder_batch self.grouped_shuffling = grouped_shuffling self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.shuffle = not disable_shuffling self._cur_epoch_itr = None self._next_epoch_itr = None self._supports_prefetch = getattr(dataset, "supports_prefetch", False) @property def frozen_batches(self): if self._frozen_batches is None: self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch)) return self._frozen_batches @property def first_batch(self): if len(self.frozen_batches) == 0: raise Exception( "The dataset is empty. This could indicate " "that all elements in the dataset have been skipped. " "Try increasing the max number of allowed tokens or using " "a larger dataset." ) if getattr(self.dataset, "supports_fetch_outside_dataloader", True): return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]]) else: return "DUMMY" def __len__(self): return int(math.ceil(len(self.frozen_batches) / float(self.num_shards))) @property def n(self): return self.iterations_in_epoch @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._next_epoch_itr is not None: return self.epoch elif self._cur_epoch_itr is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus (bool, optional): ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). set_dataset_epoch (bool, optional): update the wrapped Dataset with the new epoch number (default: True). """ if self.disable_shuffling: shuffle = False prev_epoch = self.epoch self.epoch = self.next_epoch_idx if set_dataset_epoch and hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(self.epoch) if self._next_epoch_itr is not None: self._cur_epoch_itr = self._next_epoch_itr self._next_epoch_itr = None else: if callable(self.batch_sampler) and prev_epoch != self.epoch: # reset _frozen_batches to refresh the next epoch self._frozen_batches = None self._cur_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus, ) self.shuffle = shuffle return self._cur_epoch_itr def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" return not self._cur_epoch_itr.has_next() @property def iterations_in_epoch(self): """The number of consumed batches in the current epoch.""" if self._cur_epoch_itr is not None: return self._cur_epoch_itr.n elif self._next_epoch_itr is not None: return self._next_epoch_itr.n return 0 def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" if self.end_of_epoch(): epoch = self.epoch + 1 iter_in_epoch = 0 else: epoch = self.epoch iter_in_epoch = self.iterations_in_epoch return { "version": 2, "epoch": epoch, "iterations_in_epoch": iter_in_epoch, "shuffle": self.shuffle, } def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" self.epoch = state_dict["epoch"] itr_pos = state_dict.get("iterations_in_epoch", 0) version = state_dict.get("version", 1) if itr_pos > 0: # fast-forward epoch iterator self._next_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle=state_dict.get("shuffle", True), offset=itr_pos, ) if self._next_epoch_itr is None: if version == 1: # legacy behavior: we finished the epoch, increment epoch counter self.epoch += 1 else: raise RuntimeError( "Cannot resume training due to dataloader mismatch, please " "report this to the fairseq developers. You can relaunch " "training with `--reset-dataloader` and it should work." ) else: self._next_epoch_itr = None def _get_iterator_for_epoch( self, epoch, shuffle, fix_batches_to_gpus=False, offset=0 ): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): if self.grouped_shuffling: grouped_batches = [ batches[(i * self.num_shards) : ((i + 1) * self.num_shards)] for i in range((len(batches) // self.num_shards)) ] np.random.shuffle(grouped_batches) batches = list(itertools.chain(*grouped_batches)) else: np.random.shuffle(batches) return batches if self._supports_prefetch: batches = self.frozen_batches if shuffle and not fix_batches_to_gpus: batches = shuffle_batches(list(batches), self.seed + epoch) batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) self.dataset.prefetch([i for s in batches for i in s]) if shuffle and fix_batches_to_gpus: batches = shuffle_batches(batches, self.seed + epoch + self.shard_id) else: if shuffle: batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch) else: batches = self.frozen_batches batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" # Create data loader itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, timeout=self.timeout, pin_memory=True, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CountingIterator itr = CountingIterator(itr, start=offset) if self.skip_remainder_batch: # TODO: Below is a lazy implementation which discard the final batch regardless # of whether it is a full batch or not. total_num_itrs = len(batches) - 1 itr.take(total_num_itrs) logger.info(f"skip final residual batch, total_num_itrs = {total_num_itrs}") return itr class GroupedIterator(CountingIterator): """Wrapper around an iterable that returns groups (chunks) of items. Args: iterable (iterable): iterable to wrap chunk_size (int): size of each chunk skip_remainder_batch (bool, optional): if set, discard the last grouped batch in each training epoch, as the last grouped batch is usually smaller than local_batch_size * distributed_word_size * chunk_size (default: ``False``). Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, chunk_size, skip_remainder_batch=False): if skip_remainder_batch: total_num_itrs = int(math.floor(len(iterable) / float(chunk_size))) logger.info( f"skip final residual batch, grouped total_num_itrs = {total_num_itrs}" ) else: total_num_itrs = int(math.ceil(len(iterable) / float(chunk_size))) logger.info(f"grouped total_num_itrs = {total_num_itrs}") itr = _chunk_iterator(iterable, chunk_size, skip_remainder_batch) super().__init__( itr, start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))), total=total_num_itrs, ) self.chunk_size = chunk_size if skip_remainder_batch: self.take(total_num_itrs) # TODO: [Hack] Here the grouped iterator modifies the base iterator size so that # training can move into the next epoch once the grouped iterator is exhausted. # Double-check this implementation in case unexpected behavior occurs. iterable.take(total_num_itrs * chunk_size) def _chunk_iterator(itr, chunk_size, skip_remainder_batch=False): chunk = [] for x in itr: chunk.append(x) if len(chunk) == chunk_size: yield chunk chunk = [] if not skip_remainder_batch and len(chunk) > 0: yield chunk class ShardedIterator(CountingIterator): """A sharded wrapper around an iterable, padded to length. Args: iterable (iterable): iterable to wrap num_shards (int): number of shards to split the iterable into shard_id (int): which shard to iterator over fill_value (Any, optional): padding value when the iterable doesn't evenly divide *num_shards* (default: None). Attributes: n (int): number of elements consumed from this iterator """ def __init__( self, iterable, num_shards, shard_id, fill_value=None, skip_remainder_batch=None ): """ Args: skip_remainder_batch: ignored""" if shard_id < 0 or shard_id >= num_shards: raise ValueError("shard_id must be between 0 and num_shards") sharded_len = int(math.ceil(len(iterable) / float(num_shards))) itr = map( operator.itemgetter(1), itertools.zip_longest( range(sharded_len), itertools.islice(iterable, shard_id, len(iterable), num_shards), fillvalue=fill_value, ), ) super().__init__( itr, start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))), total=sharded_len, ) class BackgroundConsumer(Thread): def __init__(self, queue, source, max_len, cuda_device): Thread.__init__(self) self._queue = queue self._source = source self._max_len = max_len self.count = 0 self.cuda_device = cuda_device def run(self): # set_device to avoid creation of GPU0 context when using pin_memory if self.cuda_device is not None: torch.cuda.set_device(self.cuda_device) try: for item in self._source: self._queue.put(item) # Stop if we reached the maximum length self.count += 1 if self._max_len is not None and self.count >= self._max_len: break # Signal the consumer we are done. self._queue.put(_sentinel) except Exception as e: self._queue.put(e) class BufferedIterator(object): def __init__(self, size, iterable): self._queue = queue.Queue(size) self._iterable = iterable self._consumer = None self.start_time = time.time() self.warning_time = None self.total = len(iterable) def _create_consumer(self): self._consumer = BackgroundConsumer( self._queue, self._iterable, self.total, torch.cuda.current_device() if torch.cuda.is_available() else None, ) self._consumer.daemon = True self._consumer.start() def __iter__(self): return self def __len__(self): return self.total def take(self, n): self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._iterable, "take"): self._iterable.take(n) return self def __next__(self): # Create consumer if not created yet if self._consumer is None: self._create_consumer() # Notify the user if there is a data loading bottleneck if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): if time.time() - self.start_time > 5 * 60: if ( self.warning_time is None or time.time() - self.warning_time > 15 * 60 ): logger.debug( "Data loading buffer is empty or nearly empty. This may " "indicate a data loading bottleneck, and increasing the " "number of workers (--num-workers) may help." ) self.warning_time = time.time() # Get next example item = self._queue.get(True) if isinstance(item, Exception): raise item if item is _sentinel: raise StopIteration() return item class GroupedEpochBatchIterator(EpochBatchIterator): """Grouped version of EpochBatchIterator It takes several samplers from different datasets. Each epoch shuffle the dataset wise sampler individually with different random seed. The those sub samplers are combined with into one big samplers with deterministic permutation to mix batches from different datasets. It will act like EpochBatchIterator but make sure 1) data from one data set each time 2) for different workers, they use the same order to fetch the data so they will use data from the same dataset everytime mult_rate is used for update_freq > 1 case where we want to make sure update_freq mini-batches come from same source """ def __init__( self, dataset, collate_fn, batch_samplers, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, mult_rate=1, buffer_size=0, skip_remainder_batch=False, ): super().__init__( dataset, collate_fn, batch_samplers, seed, num_shards, shard_id, num_workers, epoch, buffer_size, skip_remainder_batch=skip_remainder_batch, ) # level 0: sub-samplers 1: batch_idx 2: batches self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers]) self.step_size = mult_rate * num_shards self.lengths = [ (len(x) // self.step_size) * self.step_size for x in self.frozen_batches ] def __len__(self): return sum(self.lengths) @property def first_batch(self): if len(self.frozen_batches) == 0: raise Exception( "The dataset is empty. This could indicate " "that all elements in the dataset have been skipped. " "Try increasing the max number of allowed tokens or using " "a larger dataset." ) if self.dataset.supports_fetch_outside_dataloader: return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]]) else: return "DUMMY" def _get_iterator_for_epoch( self, epoch, shuffle, fix_batches_to_gpus=False, offset=0 ): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches def return_full_batches(batch_sets, seed, shuffle): if shuffle: batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets] batch_sets = [ batch_sets[i][: self.lengths[i]] for i in range(len(batch_sets)) ] batches = list(itertools.chain.from_iterable(batch_sets)) if shuffle: with data_utils.numpy_seed(seed): idx = np.random.permutation(len(batches) // self.step_size) if len(idx) * self.step_size != len(batches): raise ValueError( "ERROR: %d %d %d %d" % (len(idx), self.step_size, len(batches), self.shard_id), ":".join(["%d" % x for x in self.lengths]), ) mini_shards = [ batches[i * self.step_size : (i + 1) * self.step_size] for i in idx ] batches = list(itertools.chain.from_iterable(mini_shards)) return batches if self._supports_prefetch: raise NotImplementedError("To be implemented") else: batches = return_full_batches( self.frozen_batches, self.seed + epoch, shuffle ) batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, ) if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) return CountingIterator(itr, start=offset)
29,702
35.135036
94
py
sign-topic
sign-topic-main/fairseq/data/backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from . import FairseqDataset def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True): """Backtranslate a list of samples. Given an input (*samples*) of the form: [{'id': 1, 'source': 'hallo welt'}] this will return: [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}] Args: samples (List[dict]): samples to backtranslate. Individual samples are expected to have a 'source' key, which will become the 'target' after backtranslation. collate_fn (callable): function to collate samples into a mini-batch generate_fn (callable): function to generate backtranslations cuda (bool): use GPU for generation (default: ``True``) Returns: List[dict]: an updated list of samples with a backtranslated source """ collated_samples = collate_fn(samples) s = utils.move_to_cuda(collated_samples) if cuda else collated_samples generated_sources = generate_fn(s) id_to_src = {sample["id"]: sample["source"] for sample in samples} # Go through each tgt sentence in batch and its corresponding best # generated hypothesis and create a backtranslation data pair # {id: id, source: generated backtranslation, target: original tgt} return [ { "id": id.item(), "target": id_to_src[id.item()], "source": hypos[0]["tokens"].cpu(), } for id, hypos in zip(collated_samples["id"], generated_sources) ] class BacktranslationDataset(FairseqDataset): """ Sets up a backtranslation dataset which takes a tgt batch, generates a src using a tgt-src backtranslation function (*backtranslation_fn*), and returns the corresponding `{generated src, input tgt}` batch. Args: tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be backtranslated. Only the source side of this dataset will be used. After backtranslation, the source sentences in this dataset will be returned as the targets. src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated sentences. tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of sentences to be backtranslated. backtranslation_fn (callable, optional): function to call to generate backtranslations. This is typically the `generate` method of a :class:`~fairseq.sequence_generator.SequenceGenerator` object. Pass in None when it is not available at initialization time, and use set_backtranslation_fn function to set it when available. output_collater (callable, optional): function to call on the backtranslated samples to create the final batch (default: ``tgt_dataset.collater``). cuda: use GPU for generation """ def __init__( self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs ): self.tgt_dataset = tgt_dataset self.backtranslation_fn = backtranslation_fn self.output_collater = ( output_collater if output_collater is not None else tgt_dataset.collater ) self.cuda = cuda if torch.cuda.is_available() else False self.src_dict = src_dict self.tgt_dict = tgt_dict def __getitem__(self, index): """ Returns a single sample from *tgt_dataset*. Note that backtranslation is not applied in this step; use :func:`collater` instead to backtranslate a batch of samples. """ return self.tgt_dataset[index] def __len__(self): return len(self.tgt_dataset) def set_backtranslation_fn(self, backtranslation_fn): self.backtranslation_fn = backtranslation_fn def collater(self, samples): """Merge and backtranslate a list of samples to form a mini-batch. Using the samples from *tgt_dataset*, load a collated target sample to feed to the backtranslation model. Then take the backtranslation with the best score as the source and the original input as the target. Note: we expect *tgt_dataset* to provide a function `collater()` that will collate samples into the format expected by *backtranslation_fn*. After backtranslation, we will feed the new list of samples (i.e., the `(backtranslated source, original source)` pairs) to *output_collater* and return the result. Args: samples (List[dict]): samples to backtranslate and collate Returns: dict: a mini-batch with keys coming from *output_collater* """ if samples[0].get("is_dummy", False): return samples samples = backtranslate_samples( samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=(lambda net_input: self.backtranslation_fn(net_input)), cuda=self.cuda, ) return self.output_collater(samples) def num_tokens(self, index): """Just use the tgt dataset num_tokens""" return self.tgt_dataset.num_tokens(index) def ordered_indices(self): """Just use the tgt dataset ordered_indices""" return self.tgt_dataset.ordered_indices() def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``. Note: we use *tgt_dataset* to approximate the length of the source sentence, since we do not know the actual length until after backtranslation. """ tgt_size = self.tgt_dataset.size(index)[0] return (tgt_size, tgt_size) @property def supports_prefetch(self): return getattr(self.tgt_dataset, "supports_prefetch", False) def prefetch(self, indices): return self.tgt_dataset.prefetch(indices)
6,247
36.638554
84
py
sign-topic
sign-topic-main/fairseq/data/monolingual_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import FairseqDataset, data_utils def collate(samples, pad_idx, eos_idx, fixed_pad_length=None, pad_to_bsz=None): if len(samples) == 0: return {} def merge(key, is_list=False): if is_list: res = [] for i in range(len(samples[0][key])): res.append( data_utils.collate_tokens( [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False, pad_to_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, ) ) return res else: return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False, pad_to_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, ) src_tokens = merge("source") if samples[0]["target"] is not None: is_target_list = isinstance(samples[0]["target"], list) target = merge("target", is_target_list) else: target = src_tokens return { "id": torch.LongTensor([s["id"] for s in samples]), "nsentences": len(samples), "ntokens": sum(len(s["source"]) for s in samples), "net_input": { "src_tokens": src_tokens, "src_lengths": torch.LongTensor([s["source"].numel() for s in samples]), }, "target": target, } class MonolingualDataset(FairseqDataset): """ A wrapper around torch.utils.data.Dataset for monolingual data. Args: dataset (torch.utils.data.Dataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary shuffle (bool, optional): shuffle the elements before batching (default: True). """ def __init__( self, dataset, sizes, src_vocab, tgt_vocab=None, add_eos_for_other_targets=False, shuffle=False, targets=None, add_bos_token=False, fixed_pad_length=None, pad_to_bsz=None, src_lang_idx=None, tgt_lang_idx=None, ): self.dataset = dataset self.sizes = np.array(sizes) self.vocab = src_vocab self.tgt_vocab = tgt_vocab or src_vocab self.add_eos_for_other_targets = add_eos_for_other_targets self.shuffle = shuffle self.add_bos_token = add_bos_token self.fixed_pad_length = fixed_pad_length self.pad_to_bsz = pad_to_bsz self.src_lang_idx = src_lang_idx self.tgt_lang_idx = tgt_lang_idx assert targets is None or all( t in {"self", "future", "past"} for t in targets ), "targets must be none or one of 'self', 'future', 'past'" if targets is not None and len(targets) == 0: targets = None self.targets = targets def __getitem__(self, index): if self.targets is not None: # *future_target* is the original sentence # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) # # Left-to-right language models should condition on *source* and # predict *future_target*. # Right-to-left language models should condition on *source* and # predict *past_target*. source, future_target, past_target = self.dataset[index] source, target = self._make_source_target( source, future_target, past_target ) else: source = self.dataset[index] target = None source, target = self._maybe_add_bos(source, target) return {"id": index, "source": source, "target": target} def __len__(self): return len(self.dataset) def _make_source_target(self, source, future_target, past_target): if self.targets is not None: target = [] if ( self.add_eos_for_other_targets and (("self" in self.targets) or ("past" in self.targets)) and source[-1] != self.vocab.eos() ): # append eos at the end of source source = torch.cat([source, source.new([self.vocab.eos()])]) if "future" in self.targets: future_target = torch.cat( [future_target, future_target.new([self.vocab.pad()])] ) if "past" in self.targets: # first token is before the start of sentence which is only used in "none" break mode when # add_eos_for_other_targets is False past_target = torch.cat( [ past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None], ] ) for t in self.targets: if t == "self": target.append(source) elif t == "future": target.append(future_target) elif t == "past": target.append(past_target) else: raise Exception("invalid target " + t) if len(target) == 1: target = target[0] else: target = future_target return source, self._filter_vocab(target) def _maybe_add_bos(self, source, target): if self.add_bos_token: source = torch.cat([source.new([self.vocab.bos()]), source]) if target is not None: target = torch.cat([target.new([self.tgt_vocab.bos()]), target]) return source, target def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[indices] def _filter_vocab(self, target): if len(self.tgt_vocab) != len(self.vocab): def _filter(target): mask = target.ge(len(self.tgt_vocab)) if mask.any(): target[mask] = self.tgt_vocab.unk() return target if isinstance(target, list): return [_filter(t) for t in target] return _filter(target) return target def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the right. - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the right. """ return collate( samples, self.vocab.pad(), self.vocab.eos(), self.fixed_pad_length, self.pad_to_bsz, ) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return self.sizes[index] def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch(indices)
8,832
33.775591
110
py
sign-topic
sign-topic-main/fairseq/data/roll_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset class RollDataset(BaseWrapperDataset): def __init__(self, dataset, shifts): super().__init__(dataset) self.shifts = shifts def __getitem__(self, index): item = self.dataset[index] return torch.roll(item, self.shifts)
485
24.578947
65
py
sign-topic
sign-topic-main/fairseq/data/replace_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import BaseWrapperDataset class ReplaceDataset(BaseWrapperDataset): """Replaces tokens found in the dataset by a specified replacement token Args: dataset (~torch.utils.data.Dataset): dataset to replace tokens in replace_map(Dictionary[int,int]): map of token to replace -> replacement token offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be as many as the number of objects returned by the underlying dataset __getitem__ method. """ def __init__(self, dataset, replace_map, offsets): super().__init__(dataset) assert len(replace_map) > 0 self.replace_map = replace_map self.offsets = offsets def __getitem__(self, index): item = self.dataset[index] is_tuple = isinstance(item, tuple) srcs = item if is_tuple else [item] for offset, src in zip(self.offsets, srcs): for k, v in self.replace_map.items(): src_off = src[offset:] if offset >= 0 else src[:offset] src_off.masked_fill_(src_off == k, v) item = srcs if is_tuple else srcs[0] return item
1,370
36.054054
113
py
sign-topic
sign-topic-main/fairseq/data/id_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class IdDataset(FairseqDataset): def __getitem__(self, index): return index def __len__(self): return 0 def collater(self, samples): return torch.tensor(samples)
423
20.2
65
py
sign-topic
sign-topic-main/fairseq/data/indexed_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import shutil import struct from functools import lru_cache import numpy as np import torch from fairseq.dataclass.constants import DATASET_IMPL_CHOICES from fairseq.data.fasta_dataset import FastaDataset from fairseq.file_io import PathManager from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex from . import FairseqDataset from typing import Union def best_fitting_int_dtype( max_int_to_represent, ) -> Union[np.uint16, np.uint32, np.int64]: if max_int_to_represent is None: return np.uint32 # Safe guess elif max_int_to_represent < 65500: return np.uint16 elif max_int_to_represent < 4294967295: return np.uint32 else: return np.int64 # we avoid np.uint64 because it doesn't save space and its type promotion behaves unexpectedly # https://github.com/numpy/numpy/issues/5745 def get_available_dataset_impl(): return list(map(str, DATASET_IMPL_CHOICES)) def infer_dataset_impl(path): if IndexedRawTextDataset.exists(path): return "raw" elif IndexedDataset.exists(path): with open(index_file_path(path), "rb") as f: magic = f.read(8) if magic == IndexedDataset._HDR_MAGIC: return "cached" elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: return "mmap" elif magic == HuffmanMMapIndex._HDR_MAGIC[:8]: return "huffman" else: return None elif FastaDataset.exists(path): return "fasta" else: return None def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError elif impl == "huffman": raise ValueError( "Use HuffmanCodeBuilder directly as it has a different interface." ) else: return IndexedDatasetBuilder(out_file) def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None): if impl == "raw" and IndexedRawTextDataset.exists(path): assert dictionary is not None return IndexedRawTextDataset(path, dictionary) elif impl == "lazy" and IndexedDataset.exists(path): return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing) elif impl == "cached" and IndexedDataset.exists(path): return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing) elif impl == "mmap" and MMapIndexedDataset.exists(path): return MMapIndexedDataset(path) elif impl == "fasta" and FastaDataset.exists(path): from fairseq.data.fasta_dataset import EncodedFastaDataset return EncodedFastaDataset(path, dictionary) elif impl == "huffman" and HuffmanMMapIndexedDataset.exists(path): return HuffmanMMapIndexedDataset(path) return None def dataset_exists(path, impl): if impl == "raw": return IndexedRawTextDataset.exists(path) elif impl == "mmap": return MMapIndexedDataset.exists(path) elif impl == "huffman": return HuffmanMMapIndexedDataset.exists(path) else: return IndexedDataset.exists(path) def read_longs(f, n): a = np.empty(n, dtype=np.int64) f.readinto(a) return a def write_longs(f, a): f.write(np.array(a, dtype=np.int64)) _code_to_dtype = { 1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float, 7: np.double, 8: np.uint16, 9: np.uint32, 10: np.uint64, } def _dtype_header_code(dtype) -> int: for k in _code_to_dtype.keys(): if _code_to_dtype[k] == dtype: return k raise ValueError(dtype) def index_file_path(prefix_path): return prefix_path + ".idx" def data_file_path(prefix_path): return prefix_path + ".bin" class IndexedDataset(FairseqDataset): """Loader for TorchNet IndexedDataset""" _HDR_MAGIC = b"TNTIDX\x00\x00" def __init__(self, path, fix_lua_indexing=False): super().__init__() self.path = path self.fix_lua_indexing = fix_lua_indexing self.data_file = None self.read_index(path) def read_index(self, path): with open(index_file_path(path), "rb") as f: magic = f.read(8) assert magic == self._HDR_MAGIC, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = f.read(8) assert struct.unpack("<Q", version) == (1,) code, self.element_size = struct.unpack("<QQ", f.read(16)) self.dtype = _code_to_dtype[code] self._len, self.s = struct.unpack("<QQ", f.read(16)) self.dim_offsets = read_longs(f, self._len + 1) self.data_offsets = read_longs(f, self._len + 1) self.sizes = read_longs(f, self.s) def read_data(self, path): self.data_file = open(data_file_path(path), "rb", buffering=0) def check_index(self, i): if i < 0 or i >= self._len: raise IndexError("index out of range") def __del__(self): if self.data_file: self.data_file.close() @lru_cache(maxsize=8) def __getitem__(self, i) -> torch.Tensor: if not self.data_file: self.read_data(self.path) self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item def __len__(self): return self._len def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) @property def supports_prefetch(self): return False # avoid prefetching to save memory class IndexedCachedDataset(IndexedDataset): def __init__(self, path, fix_lua_indexing=False): super().__init__(path, fix_lua_indexing=fix_lua_indexing) self.cache = None self.cache_index = {} @property def supports_prefetch(self): return True def prefetch(self, indices): if all(i in self.cache_index for i in indices): return if not self.data_file: self.read_data(self.path) indices = sorted(set(indices)) total_size = 0 for i in indices: total_size += self.data_offsets[i + 1] - self.data_offsets[i] self.cache = np.empty(total_size, dtype=self.dtype) ptx = 0 self.cache_index.clear() for i in indices: self.cache_index[i] = ptx size = self.data_offsets[i + 1] - self.data_offsets[i] a = self.cache[ptx : ptx + size] self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) ptx += size if self.data_file: # close and delete data file after prefetch so we can pickle self.data_file.close() self.data_file = None @lru_cache(maxsize=8) def __getitem__(self, i): self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) ptx = self.cache_index[i] np.copyto(a, self.cache[ptx : ptx + a.size]) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item class IndexedRawTextDataset(FairseqDataset): """Takes a text file as input and binarizes it in memory at instantiation. Original lines are also kept in memory""" def __init__(self, path, dictionary, append_eos=True, reverse_order=False): self.tokens_list = [] self.lines = [] self.sizes = [] self.append_eos = append_eos self.reverse_order = reverse_order self.read_data(path, dictionary) self.size = len(self.tokens_list) def read_data(self, path, dictionary): with open(path, "r", encoding="utf-8") as f: for line in f: self.lines.append(line.strip("\n")) tokens = dictionary.encode_line( line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order, ).long() self.tokens_list.append(tokens) self.sizes.append(len(tokens)) self.sizes = np.array(self.sizes) def check_index(self, i): if i < 0 or i >= self.size: raise IndexError("index out of range") @lru_cache(maxsize=8) def __getitem__(self, i): self.check_index(i) return self.tokens_list[i] def get_original_text(self, i): self.check_index(i) return self.lines[i] def __del__(self): pass def __len__(self): return self.size def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return PathManager.exists(path) class IndexedDatasetBuilder: element_sizes = { np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8, } def __init__(self, out_file, dtype=np.int32): self.out_file = open(out_file, "wb") self.dtype = dtype self.data_offsets = [0] self.dim_offsets = [0] self.sizes = [] self.element_size = self.element_sizes[self.dtype] def add_item(self, tensor): # +1 for Lua compatibility bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype)) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) for s in tensor.size(): self.sizes.append(s) self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) def merge_file_(self, another_file): index = IndexedDataset(another_file) assert index.dtype == self.dtype begin = self.data_offsets[-1] for offset in index.data_offsets[1:]: self.data_offsets.append(begin + offset) self.sizes.extend(index.sizes) begin = self.dim_offsets[-1] for dim_offset in index.dim_offsets[1:]: self.dim_offsets.append(begin + dim_offset) with open(data_file_path(another_file), "rb") as f: while True: data = f.read(1024) if data: self.out_file.write(data) else: break def finalize(self, index_file): self.out_file.close() index = open(index_file, "wb") index.write(b"TNTIDX\x00\x00") index.write(struct.pack("<Q", 1)) index.write( struct.pack("<QQ", _dtype_header_code(self.dtype), self.element_size) ) index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes))) write_longs(index, self.dim_offsets) write_longs(index, self.data_offsets) write_longs(index, self.sizes) index.close() def _warmup_mmap_file(path): with open(path, "rb") as stream: while stream.read(100 * 1024 * 1024): pass class MMapIndexedDataset(torch.utils.data.Dataset): class Index: _HDR_MAGIC = b"MMIDIDX\x00\x00" @classmethod def writer(cls, path, dtype): class _Writer: def __enter__(self): self._file = open(path, "wb") self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", 1)) self._file.write(struct.pack("<B", _dtype_header_code(dtype))) return self @staticmethod def _get_pointers(sizes): dtype_size = dtype().itemsize address = 0 pointers = [] for size in sizes: pointers.append(address) address += size * dtype_size return pointers def write(self, sizes): pointers = self._get_pointers(sizes) self._file.write(struct.pack("<Q", len(sizes))) sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = struct.unpack("<Q", stream.read(8)) assert (1,) == version (dtype_code,) = struct.unpack("<B", stream.read(1)) self._dtype = _code_to_dtype[dtype_code] self._dtype_size = self._dtype().itemsize self._len = struct.unpack("<Q", stream.read(8))[0] offset = stream.tell() _warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap @property def dtype(self): return self._dtype @property def sizes(self): return self._sizes @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def __init__(self, path): super().__init__() self._path = None self._index = None self._bin_buffer = None self._do_init(path) def __getstate__(self): return self._path def __setstate__(self, state): self._do_init(state) def _do_init(self, path): self._path = path self._index = self.Index(index_file_path(self._path)) _warmup_mmap_file(data_file_path(self._path)) self._bin_buffer_mmap = np.memmap( data_file_path(self._path), mode="r", order="C" ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap del self._index def __len__(self): return len(self._index) @lru_cache(maxsize=8) def __getitem__(self, i): ptr, size = self._index[i] np_array = np.frombuffer( self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr ) if self._index.dtype != np.int64: np_array = np_array.astype(np.int64) return torch.from_numpy(np_array) @property def sizes(self): return self._index.sizes @property def supports_prefetch(self): return False @staticmethod def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) def get_indexed_dataset_to_local(path) -> str: local_index_path = PathManager.get_local_path(index_file_path(path)) local_data_path = PathManager.get_local_path(data_file_path(path)) assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), ( "PathManager.get_local_path does not return files with expected patterns: " f"{local_index_path} and {local_data_path}" ) local_path = local_data_path[:-4] # stripping surfix ".bin" assert local_path == local_index_path[:-4] # stripping surfix ".idx" return local_path class MMapIndexedDatasetBuilder: def __init__(self, out_file, dtype=np.int64): self._data_file = open(out_file, "wb") self._dtype = dtype self._sizes = [] def add_item(self, tensor): np_array = np.array(tensor.numpy(), dtype=self._dtype) self._data_file.write(np_array.tobytes(order="C")) self._sizes.append(np_array.size) def merge_file_(self, another_file): # Concatenate index index = MMapIndexedDataset.Index(index_file_path(another_file)) assert index.dtype == self._dtype for size in index.sizes: self._sizes.append(size) # Concatenate data with open(data_file_path(another_file), "rb") as f: shutil.copyfileobj(f, self._data_file) def finalize(self, index_file): self._data_file.close() with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index: index.write(self._sizes)
18,261
30.057823
102
py
sign-topic
sign-topic-main/fairseq/data/denoising_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import numpy as np import torch from . import FairseqDataset, data_utils def collate( samples, pad_idx, eos_idx, vocab, left_pad_source=False, left_pad_target=False, input_feeding=True, pad_to_length=None, ): assert input_feeding if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx=None, # use eos_idx of each sample instead of vocab.eos() left_pad=left_pad, move_eos_to_beginning=move_eos_to_beginning, pad_to_length=pad_to_length, ) id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor([s["source"].numel() for s in samples]) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) ntokens = sum(len(s["target"]) for s in samples) if input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) prev_output_tokens = prev_output_tokens.index_select(0, sort_order) else: ntokens = sum(len(s["source"]) for s in samples) batch = { "id": id, "ntokens": ntokens, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, "target": target, "nsentences": samples[0]["source"].size(0), "sort_order": sort_order, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens return batch class DenoisingDataset(FairseqDataset): """ A wrapper around TokenBlockDataset for BART dataset. Args: dataset (TokenBlockDataset): dataset to wrap sizes (List[int]): sentence lengths vocab (~fairseq.data.Dictionary): vocabulary mask_idx (int): dictionary index used for masked token mask_whole_words: only mask whole words. This should be a byte mask over vocab indices, indicating whether it is the beginning of a word. We will extend any mask to encompass the whole word. shuffle (bool, optional): shuffle the elements before batching. Default: ``True`` seed: Seed for random number generator for reproducibility. args: argparse arguments. """ def __init__( self, dataset, sizes, vocab, mask_idx, mask_whole_words, shuffle, seed, args, eos=None, item_transform_func=None, ): self.dataset = dataset self.sizes = sizes self.vocab = vocab self.shuffle = shuffle self.seed = seed self.mask_idx = mask_idx self.mask_whole_word = mask_whole_words self.mask_ratio = args.mask self.random_ratio = args.mask_random self.insert_ratio = args.insert self.rotate_ratio = args.rotate self.permute_sentence_ratio = args.permute_sentences self.eos = eos if eos is not None else vocab.eos() self.item_transform_func = item_transform_func if args.bpe != "gpt2": self.full_stop_index = self.vocab.eos() else: assert args.bpe == "gpt2" self.full_stop_index = self.vocab.index("13") self.replace_length = args.replace_length if self.replace_length not in [-1, 0, 1]: raise ValueError(f"invalid arg: replace_length={self.replace_length}") if args.mask_length not in ["subword", "word", "span-poisson"]: raise ValueError(f"invalid arg: mask-length={args.mask_length}") if args.mask_length == "subword" and args.replace_length not in [0, 1]: raise ValueError(f"if using subwords, use replace-length=1 or 0") self.mask_span_distribution = None if args.mask_length == "span-poisson": _lambda = args.poisson_lambda lambda_to_the_k = 1 e_to_the_minus_lambda = math.exp(-_lambda) k_factorial = 1 ps = [] for k in range(0, 128): ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial) lambda_to_the_k *= _lambda k_factorial *= k + 1 if ps[-1] < 0.0000001: break ps = torch.FloatTensor(ps) self.mask_span_distribution = torch.distributions.Categorical(ps) self.epoch = 0 @property def can_reuse_epoch_itr_across_epochs(self): return True # only the noise changes, not item sizes def set_epoch(self, epoch, **unused): self.epoch = epoch def __getitem__(self, index): with data_utils.numpy_seed(self.seed, self.epoch, index): tokens = self.dataset[index] assert tokens[-1] == self.eos source, target = tokens, tokens.clone() if self.permute_sentence_ratio > 0.0: source = self.permute_sentences(source, self.permute_sentence_ratio) if self.mask_ratio > 0: source = self.add_whole_word_mask(source, self.mask_ratio) if self.insert_ratio > 0: source = self.add_insertion_noise(source, self.insert_ratio) if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio: source = self.add_rolling_noise(source) # there can additional changes to make: if self.item_transform_func is not None: source, target = self.item_transform_func(source, target) assert (source >= 0).all() assert (source[1:-1] >= 1).all() assert (source <= len(self.vocab)).all() assert source[0] == self.vocab.bos() assert source[-1] == self.eos return { "id": index, "source": source, "target": target, } def __len__(self): return len(self.dataset) def permute_sentences(self, source, p=1.0): full_stops = source == self.full_stop_index # Pretend it ends with a full stop so last span is a sentence full_stops[-2] = 1 # Tokens that are full stops, where the previous token is not sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 result = source.clone() num_sentences = sentence_ends.size(0) num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0) substitutions = torch.randperm(num_sentences)[:num_to_permute] ordering = torch.arange(0, num_sentences) ordering[substitutions] = substitutions[torch.randperm(num_to_permute)] # Ignore <bos> at start index = 1 for i in ordering: sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]] result[index : index + sentence.size(0)] = sentence index += sentence.size(0) return result def word_starts(self, source): if self.mask_whole_word is not None: is_word_start = self.mask_whole_word.gather(0, source) else: is_word_start = torch.ones(source.size()) is_word_start[0] = 0 is_word_start[-1] = 0 return is_word_start def add_whole_word_mask(self, source, p): is_word_start = self.word_starts(source) num_to_mask = int(math.ceil(is_word_start.float().sum() * p)) num_inserts = 0 if num_to_mask == 0: return source if self.mask_span_distribution is not None: lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,)) # Make sure we have enough to mask cum_length = torch.cumsum(lengths, 0) while cum_length[-1] < num_to_mask: lengths = torch.cat( [ lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,)), ], dim=0, ) cum_length = torch.cumsum(lengths, 0) # Trim to masking budget i = 0 while cum_length[i] < num_to_mask: i += 1 lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1]) num_to_mask = i + 1 lengths = lengths[:num_to_mask] # Handle 0-length mask (inserts) separately lengths = lengths[lengths > 0] num_inserts = num_to_mask - lengths.size(0) num_to_mask -= num_inserts if num_to_mask == 0: return self.add_insertion_noise(source, num_inserts / source.size(0)) assert (lengths > 0).all() else: lengths = torch.ones((num_to_mask,)).long() assert is_word_start[-1] == 0 word_starts = is_word_start.nonzero(as_tuple=False) indices = word_starts[ torch.randperm(word_starts.size(0))[:num_to_mask] ].squeeze(1) mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio source_length = source.size(0) assert source_length - 1 not in indices to_keep = torch.ones(source_length, dtype=torch.bool) is_word_start[ -1 ] = 255 # acts as a long length, so spans don't go over the end of doc if self.replace_length == 0: to_keep[indices] = 0 else: # keep index, but replace it with [MASK] source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint( 1, len(self.vocab), size=(mask_random.sum(),) ) if self.mask_span_distribution is not None: assert len(lengths.size()) == 1 assert lengths.size() == indices.size() lengths -= 1 while indices.size(0) > 0: assert lengths.size() == indices.size() lengths -= is_word_start[indices + 1].long() uncompleted = lengths >= 0 indices = indices[uncompleted] + 1 mask_random = mask_random[uncompleted] lengths = lengths[uncompleted] if self.replace_length != -1: # delete token to_keep[indices] = 0 else: # keep index, but replace it with [MASK] source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint( 1, len(self.vocab), size=(mask_random.sum(),) ) else: # A bit faster when all lengths are 1 while indices.size(0) > 0: uncompleted = is_word_start[indices + 1] == 0 indices = indices[uncompleted] + 1 mask_random = mask_random[uncompleted] if self.replace_length != -1: # delete token to_keep[indices] = 0 else: # keep index, but replace it with [MASK] source[indices] = self.mask_idx source[indices[mask_random]] = torch.randint( 1, len(self.vocab), size=(mask_random.sum(),) ) assert source_length - 1 not in indices source = source[to_keep] if num_inserts > 0: source = self.add_insertion_noise(source, num_inserts / source.size(0)) return source def add_permuted_noise(self, tokens, p): num_words = len(tokens) num_to_permute = math.ceil(((num_words * 2) * p) / 2.0) substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1 tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]] return tokens def add_rolling_noise(self, tokens): offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1) tokens = torch.cat( (tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]), dim=0, ) return tokens def add_insertion_noise(self, tokens, p): if p == 0.0: return tokens num_tokens = len(tokens) n = int(math.ceil(num_tokens * p)) noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1 noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool) noise_mask[noise_indices] = 1 result = torch.LongTensor(n + len(tokens)).fill_(-1) num_random = int(math.ceil(n * self.random_ratio)) result[noise_indices[num_random:]] = self.mask_idx result[noise_indices[:num_random]] = torch.randint( low=1, high=len(self.vocab), size=(num_random,) ) result[~noise_mask] = tokens assert (result >= 0).all() return result def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return collate( samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length ) def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return self.sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return self.sizes[index] def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) return indices[np.argsort(self.sizes[indices], kind="mergesort")] def prefetch(self, indices): self.src.prefetch(indices) self.tgt.prefetch(indices) @property def supports_prefetch(self): return ( hasattr(self.src, "supports_prefetch") and self.src.supports_prefetch and hasattr(self.tgt, "supports_prefetch") and self.tgt.supports_prefetch )
15,627
34.762014
88
py
sign-topic
sign-topic-main/fairseq/data/prepend_token_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class PrependTokenDataset(BaseWrapperDataset): def __init__(self, dataset, token=None): super().__init__(dataset) self.token = token if token is not None: self._sizes = np.array(dataset.sizes) + 1 else: self._sizes = dataset.sizes def __getitem__(self, idx): item = self.dataset[idx] if self.token is not None: item = torch.cat([item.new([self.token]), item]) return item @property def sizes(self): return self._sizes def num_tokens(self, index): n = self.dataset.num_tokens(index) if self.token is not None: n += 1 return n def size(self, index): n = self.dataset.size(index) if self.token is not None: n += 1 return n
1,066
24.404762
65
py
sign-topic
sign-topic-main/fairseq/data/numel_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class NumelDataset(BaseWrapperDataset): def __init__(self, dataset, reduce=False): super().__init__(dataset) self.reduce = reduce def __getitem__(self, index): item = self.dataset[index] if torch.is_tensor(item): return torch.numel(item) else: return np.size(item) def __len__(self): return len(self.dataset) def collater(self, samples): if self.reduce: return sum(samples) else: return torch.tensor(samples)
786
23.59375
65
py
sign-topic
sign-topic-main/fairseq/data/noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import data_utils class WordNoising(object): """Generate a noisy version of a sentence, without changing words themselves.""" def __init__(self, dictionary, bpe_cont_marker="@@", bpe_end_marker=None): self.dictionary = dictionary self.bpe_end = None if bpe_cont_marker: self.bpe_end = np.array( [ not self.dictionary[i].endswith(bpe_cont_marker) for i in range(len(self.dictionary)) ] ) elif bpe_end_marker: self.bpe_end = np.array( [ self.dictionary[i].endswith(bpe_end_marker) for i in range(len(self.dictionary)) ] ) self.get_word_idx = ( self._get_bpe_word_idx if self.bpe_end is not None else self._get_token_idx ) def noising(self, x, lengths, noising_prob=0.0): raise NotImplementedError() def _get_bpe_word_idx(self, x): """ Given a list of BPE tokens, for every index in the tokens list, return the index of the word grouping that it belongs to. For example, for input x corresponding to ["how", "are", "y@@", "ou"], return [[0], [1], [2], [2]]. """ # x: (T x B) bpe_end = self.bpe_end[x] if x.size(0) == 1 and x.size(1) == 1: # Special case when we only have one word in x. If x = [[N]], # bpe_end is a scalar (bool) instead of a 2-dim array of bools, # which makes the sum operation below fail. return np.array([[0]]) # do a reduce front sum to generate word ids word_idx = bpe_end[::-1].cumsum(0)[::-1] word_idx = word_idx.max(0)[None, :] - word_idx return word_idx def _get_token_idx(self, x): """ This is to extend noising functions to be able to apply to non-bpe tokens, e.g. word or characters. """ x = torch.t(x) word_idx = np.array([range(len(x_i)) for x_i in x]) return np.transpose(word_idx) class WordDropout(WordNoising): """Randomly drop input words. If not passing blank_idx (default is None), then dropped words will be removed. Otherwise, it will be replaced by the blank_idx.""" def __init__( self, dictionary, default_dropout_prob=0.1, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_dropout_prob = default_dropout_prob def noising(self, x, lengths, dropout_prob=None, blank_idx=None): if dropout_prob is None: dropout_prob = self.default_dropout_prob # x: (T x B), lengths: B if dropout_prob == 0: return x, lengths assert 0 < dropout_prob < 1 # be sure to drop entire words word_idx = self.get_word_idx(x) sentences = [] modified_lengths = [] for i in range(lengths.size(0)): # Since dropout probabilities need to apply over non-pad tokens, # it is not trivial to generate the keep mask without consider # input lengths; otherwise, this could be done outside the loop # We want to drop whole words based on word_idx grouping num_words = max(word_idx[:, i]) + 1 # ith example: [x0, x1, ..., eos, pad, ..., pad] # We should only generate keep probs for non-EOS tokens. Thus if the # input sentence ends in EOS, the last word idx is not included in # the dropout mask generation and we append True to always keep EOS. # Otherwise, just generate the dropout mask for all word idx # positions. has_eos = x[lengths[i] - 1, i] == self.dictionary.eos() if has_eos: # has eos? keep = np.random.rand(num_words - 1) >= dropout_prob keep = np.append(keep, [True]) # keep EOS symbol else: keep = np.random.rand(num_words) >= dropout_prob words = x[: lengths[i], i].tolist() # TODO: speed up the following loop # drop words from the input according to keep new_s = [ w if keep[word_idx[j, i]] else blank_idx for j, w in enumerate(words) ] new_s = [w for w in new_s if w is not None] # we need to have at least one word in the sentence (more than the # start / end sentence symbols) if len(new_s) <= 1: # insert at beginning in case the only token left is EOS # EOS should be at end of list. new_s.insert(0, words[np.random.randint(0, len(words))]) assert len(new_s) >= 1 and ( not has_eos # Either don't have EOS at end or last token is EOS or (len(new_s) >= 2 and new_s[-1] == self.dictionary.eos()) ), "New sentence is invalid." sentences.append(new_s) modified_lengths.append(len(new_s)) # re-construct input modified_lengths = torch.LongTensor(modified_lengths) modified_x = torch.LongTensor( modified_lengths.max(), modified_lengths.size(0) ).fill_(self.dictionary.pad()) for i in range(modified_lengths.size(0)): modified_x[: modified_lengths[i], i].copy_(torch.LongTensor(sentences[i])) return modified_x, modified_lengths class WordShuffle(WordNoising): """Shuffle words by no more than k positions.""" def __init__( self, dictionary, default_max_shuffle_distance=3, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary, bpe_cont_marker, bpe_end_marker) self.default_max_shuffle_distance = 3 def noising(self, x, lengths, max_shuffle_distance=None): if max_shuffle_distance is None: max_shuffle_distance = self.default_max_shuffle_distance # x: (T x B), lengths: B if max_shuffle_distance == 0: return x, lengths # max_shuffle_distance < 1 will return the same sequence assert max_shuffle_distance > 1 # define noise word scores noise = np.random.uniform( 0, max_shuffle_distance, size=(x.size(0), x.size(1)), ) noise[0] = -1 # do not move start sentence symbol # be sure to shuffle entire words word_idx = self.get_word_idx(x) x2 = x.clone() for i in range(lengths.size(0)): length_no_eos = lengths[i] if x[lengths[i] - 1, i] == self.dictionary.eos(): length_no_eos = lengths[i] - 1 # generate a random permutation scores = word_idx[:length_no_eos, i] + noise[word_idx[:length_no_eos, i], i] # ensure no reordering inside a word scores += 1e-6 * np.arange(length_no_eos.item()) permutation = scores.argsort() # shuffle words x2[:length_no_eos, i].copy_( x2[:length_no_eos, i][torch.from_numpy(permutation)] ) return x2, lengths class UnsupervisedMTNoising(WordNoising): """ Implements the default configuration for noising in UnsupervisedMT (github.com/facebookresearch/UnsupervisedMT) """ def __init__( self, dictionary, max_word_shuffle_distance, word_dropout_prob, word_blanking_prob, bpe_cont_marker="@@", bpe_end_marker=None, ): super().__init__(dictionary) self.max_word_shuffle_distance = max_word_shuffle_distance self.word_dropout_prob = word_dropout_prob self.word_blanking_prob = word_blanking_prob self.word_dropout = WordDropout( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) self.word_shuffle = WordShuffle( dictionary=dictionary, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker, ) def noising(self, x, lengths): # 1. Word Shuffle noisy_src_tokens, noisy_src_lengths = self.word_shuffle.noising( x=x, lengths=lengths, max_shuffle_distance=self.max_word_shuffle_distance, ) # 2. Word Dropout noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_dropout_prob, ) # 3. Word Blanking noisy_src_tokens, noisy_src_lengths = self.word_dropout.noising( x=noisy_src_tokens, lengths=noisy_src_lengths, dropout_prob=self.word_blanking_prob, blank_idx=self.dictionary.unk(), ) return noisy_src_tokens class NoisingDataset(torch.utils.data.Dataset): def __init__( self, src_dataset, src_dict, seed, noiser=None, noising_class=UnsupervisedMTNoising, **kwargs ): """ Wrap a :class:`~torch.utils.data.Dataset` and apply noise to the samples based on the supplied noising configuration. Args: src_dataset (~torch.utils.data.Dataset): dataset to wrap. to build self.src_dataset -- a LanguagePairDataset with src dataset as the source dataset and None as the target dataset. Should NOT have padding so that src_lengths are accurately calculated by language_pair_dataset collate function. We use language_pair_dataset here to encapsulate the tgt_dataset so we can re-use the LanguagePairDataset collater to format the batches in the structure that SequenceGenerator expects. src_dict (~fairseq.data.Dictionary): source dictionary seed (int): seed to use when generating random noise noiser (WordNoising): a pre-initialized :class:`WordNoising` instance. If this is None, a new instance will be created using *noising_class* and *kwargs*. noising_class (class, optional): class to use to initialize a default :class:`WordNoising` instance. kwargs (dict, optional): arguments to initialize the default :class:`WordNoising` instance given by *noiser*. """ self.src_dataset = src_dataset self.src_dict = src_dict self.seed = seed self.noiser = ( noiser if noiser is not None else noising_class( dictionary=src_dict, **kwargs, ) ) self.sizes = src_dataset.sizes def __getitem__(self, index): """ Returns a single noisy sample. Multiple samples are fed to the collater create a noising dataset batch. """ src_tokens = self.src_dataset[index] src_lengths = torch.LongTensor([len(src_tokens)]) src_tokens = src_tokens.unsqueeze(0) # Transpose src tokens to fit expected shape of x in noising function # (batch size, sequence length) -> (sequence length, batch size) src_tokens_t = torch.t(src_tokens) with data_utils.numpy_seed(self.seed + index): noisy_src_tokens = self.noiser.noising(src_tokens_t, src_lengths) # Transpose back to expected src_tokens format # (sequence length, 1) -> (1, sequence length) noisy_src_tokens = torch.t(noisy_src_tokens) return noisy_src_tokens[0] def __len__(self): """ The length of the noising dataset is the length of src. """ return len(self.src_dataset) @property def supports_prefetch(self): return self.src_dataset.supports_prefetch def prefetch(self, indices): if self.src_dataset.supports_prefetch: self.src_dataset.prefetch(indices)
12,422
36.083582
88
py
sign-topic
sign-topic-main/fairseq/data/bucket_pad_length_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch.nn.functional as F from fairseq.data import BaseWrapperDataset from fairseq.data.data_utils import get_buckets, get_bucketed_sizes class BucketPadLengthDataset(BaseWrapperDataset): """ Bucket and pad item lengths to the nearest bucket size. This can be used to reduce the number of unique batch shapes, which is important on TPUs since each new batch shape requires a recompilation. Args: dataset (FairseqDatset): dataset to bucket sizes (List[int]): all item sizes num_buckets (int): number of buckets to create pad_idx (int): padding symbol left_pad (bool): if True, pad on the left; otherwise right pad """ def __init__( self, dataset, sizes, num_buckets, pad_idx, left_pad, tensor_key=None, ): super().__init__(dataset) self.pad_idx = pad_idx self.left_pad = left_pad assert num_buckets > 0 self.buckets = get_buckets(sizes, num_buckets) self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets) self._tensor_key = tensor_key def _set_tensor(self, item, val): if self._tensor_key is None: return val item[self._tensor_key] = val return item def _get_tensor(self, item): if self._tensor_key is None: return item return item[self._tensor_key] def _pad(self, tensor, bucket_size, dim=-1): num_pad = bucket_size - tensor.size(dim) return F.pad( tensor, (num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad), value=self.pad_idx, ) def __getitem__(self, index): item = self.dataset[index] bucket_size = self._bucketed_sizes[index] tensor = self._get_tensor(item) padded = self._pad(tensor, bucket_size) return self._set_tensor(item, padded) @property def sizes(self): return self._bucketed_sizes def num_tokens(self, index): return self._bucketed_sizes[index] def size(self, index): return self._bucketed_sizes[index]
2,360
28.886076
79
py
sign-topic
sign-topic-main/fairseq/data/concat_sentences_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class ConcatSentencesDataset(FairseqDataset): def __init__(self, *datasets): super().__init__() self.datasets = datasets assert all( len(ds) == len(datasets[0]) for ds in datasets ), "datasets must have the same length" def __getitem__(self, index): return torch.cat([ds[index] for ds in self.datasets]) def __len__(self): return len(self.datasets[0]) def collater(self, samples): return self.datasets[0].collater(samples) @property def sizes(self): return sum(ds.sizes for ds in self.datasets) def num_tokens(self, index): return sum(ds.num_tokens(index) for ds in self.datasets) def size(self, index): return sum(ds.size(index) for ds in self.datasets) def ordered_indices(self): return self.datasets[0].ordered_indices() @property def supports_prefetch(self): return any(getattr(ds, "supports_prefetch", False) for ds in self.datasets) def prefetch(self, indices): for ds in self.datasets: if getattr(ds, "supports_prefetch", False): ds.prefetch(indices) def set_epoch(self, epoch): super().set_epoch(epoch) for ds in self.datasets: if hasattr(ds, "set_epoch"): ds.set_epoch(epoch)
1,558
27.345455
83
py
sign-topic
sign-topic-main/fairseq/data/fairseq_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch.utils.data from fairseq.data import data_utils logger = logging.getLogger(__name__) class EpochListening: """Mixin for receiving updates whenever the epoch increments.""" @property def can_reuse_epoch_itr_across_epochs(self): """ Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for this dataset across epochs. This needs to return ``False`` if the sample sizes can change across epochs, in which case we may need to regenerate batches at each epoch. If your dataset relies in ``set_epoch`` then you should consider setting this to ``False``. """ return True def set_epoch(self, epoch): """Will receive the updated epoch number at the beginning of the epoch.""" pass class FairseqDataset(torch.utils.data.Dataset, EpochListening): """A dataset that provides helpers for batching.""" def __getitem__(self, index): raise NotImplementedError def __len__(self): raise NotImplementedError def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ raise NotImplementedError def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" raise NotImplementedError def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" raise NotImplementedError def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" raise NotImplementedError def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" return np.arange(len(self), dtype=np.int64) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return False def attr(self, attr: str, index: int): return getattr(self, attr, None) def prefetch(self, indices): """Prefetch the data required for this epoch.""" raise NotImplementedError def get_batch_shapes(self): """ Return a list of valid batch shapes, for example:: [(8, 512), (16, 256), (32, 128)] The first dimension of each tuple is the batch size and can be ``None`` to automatically infer the max batch size based on ``--max-tokens``. The second dimension of each tuple is the max supported length as given by :func:`fairseq.data.FairseqDataset.num_tokens`. This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size` to restrict batch shapes. This is useful on TPUs to avoid too many dynamic shapes (and recompilations). """ return None def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): """ Given an ordered set of indices, return batches according to *max_tokens*, *max_sentences* and *required_batch_size_multiple*. """ from fairseq.data import data_utils fixed_shapes = self.get_batch_shapes() if fixed_shapes is not None: def adjust_bsz(bsz, num_tokens): if bsz is None: assert max_tokens is not None, "Must specify --max-tokens" bsz = max_tokens // num_tokens if max_sentences is not None: bsz = min(bsz, max_sentences) elif ( bsz >= required_batch_size_multiple and bsz % required_batch_size_multiple != 0 ): bsz -= bsz % required_batch_size_multiple return bsz fixed_shapes = np.array( [ [adjust_bsz(bsz, num_tokens), num_tokens] for (bsz, num_tokens) in fixed_shapes ] ) try: num_tokens_vec = self.num_tokens_vec(indices).astype("int64") except NotImplementedError: num_tokens_vec = None return data_utils.batch_by_size( indices, num_tokens_fn=self.num_tokens, num_tokens_vec=num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, fixed_shapes=fixed_shapes, ) def filter_indices_by_size(self, indices, max_sizes): """ Filter a list of sample indices. Remove those that are longer than specified in *max_sizes*. WARNING: don't update, override method in child classes Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if isinstance(max_sizes, float) or isinstance(max_sizes, int): if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray): ignored = indices[self.sizes[indices] > max_sizes].tolist() indices = indices[self.sizes[indices] <= max_sizes] elif ( hasattr(self, "sizes") and isinstance(self.sizes, list) and len(self.sizes) == 1 ): ignored = indices[self.sizes[0][indices] > max_sizes].tolist() indices = indices[self.sizes[0][indices] <= max_sizes] else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored @property def supports_fetch_outside_dataloader(self): """Whether this dataset supports fetching outside the workers of the dataloader.""" return True class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening): """ For datasets that need to be read sequentially, usually because the data is being streamed or otherwise can't be manipulated on a single machine. """ def __iter__(self): raise NotImplementedError
7,123
33.582524
91
py
sign-topic
sign-topic-main/fairseq/data/transform_eos_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import FairseqDataset class TransformEosDataset(FairseqDataset): """A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS. Note that the transformation is applied in :func:`collater`. Args: dataset (~fairseq.data.FairseqDataset): dataset to wrap eos (int): index of the end-of-sentence symbol append_eos_to_src (bool, optional): append EOS to the end of src remove_eos_from_src (bool, optional): remove EOS from the end of src append_eos_to_tgt (bool, optional): append EOS to the end of tgt remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt """ def __init__( self, dataset, eos, append_eos_to_src=False, remove_eos_from_src=False, append_eos_to_tgt=False, remove_eos_from_tgt=False, has_target=True, ): if not isinstance(dataset, FairseqDataset): raise ValueError("dataset must be an instance of FairseqDataset") if append_eos_to_src and remove_eos_from_src: raise ValueError("cannot combine append_eos_to_src and remove_eos_from_src") if append_eos_to_tgt and remove_eos_from_tgt: raise ValueError("cannot combine append_eos_to_tgt and remove_eos_from_tgt") self.dataset = dataset self.eos = torch.LongTensor([eos]) self.append_eos_to_src = append_eos_to_src self.remove_eos_from_src = remove_eos_from_src self.append_eos_to_tgt = append_eos_to_tgt self.remove_eos_from_tgt = remove_eos_from_tgt self.has_target = has_target # precompute how we should adjust the reported sizes self._src_delta = 0 self._src_delta += 1 if append_eos_to_src else 0 self._src_delta -= 1 if remove_eos_from_src else 0 self._tgt_delta = 0 self._tgt_delta += 1 if append_eos_to_tgt else 0 self._tgt_delta -= 1 if remove_eos_from_tgt else 0 self._checked_src = False self._checked_tgt = False def _check_src(self, src, expect_eos): if not self._checked_src: assert (src[-1] == self.eos[0]) == expect_eos self._checked_src = True def _check_tgt(self, tgt, expect_eos): if self.has_target and not self._checked_tgt: assert (tgt[-1] == self.eos[0]) == expect_eos self._checked_tgt = True def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples): def transform(item): if self.append_eos_to_src: self.eos = self.eos.to(device=item["source"].device) self._check_src(item["source"], expect_eos=False) item["source"] = torch.cat([item["source"], self.eos]) if self.remove_eos_from_src: self.eos = self.eos.to(device=item["source"].device) self._check_src(item["source"], expect_eos=True) item["source"] = item["source"][:-1] if self.append_eos_to_tgt: self.eos = self.eos.to(device=item["target"].device) self._check_tgt(item["target"], expect_eos=False) item["target"] = torch.cat([item["target"], self.eos]) if self.remove_eos_from_tgt: self.eos = self.eos.to(device=item["target"].device) self._check_tgt(item["target"], expect_eos=True) item["target"] = item["target"][:-1] return item samples = list(map(transform, samples)) return self.dataset.collater(samples) def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): if self.has_target: src_len, tgt_len = self.dataset.size(index) return (src_len + self._src_delta, tgt_len + self._tgt_delta) else: return self.dataset.size(index) def ordered_indices(self): # NOTE: we assume that the ordering does not change based on the # addition or removal of eos return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
4,575
36.818182
88
py
sign-topic
sign-topic-main/fairseq/data/sign_language/sign_features_dataset.py
# This code is inspired by the raw_audio_dataset implementation (commit: 1575f30) # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import logging from enum import Enum from pathlib import Path from typing import List, Union, Optional import h5py import numpy as np import pandas as pd import torch import torch.nn.functional as F import pdb from fairseq.data import FairseqDataset, BaseWrapperDataset, RandomCropDataset from fairseq.data.data_utils import ( compute_mask_indices, numpy_seed ) from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel logger = logging.getLogger(__name__) class SignFeatsType(Enum): keypoints = "keypoints" i3d = "i3d" CNN2d = "CNN2d" class SignFeatsDataset(FairseqDataset): def __init__( self, ids: List[str], feats_file: Union[Path, str], sizes: List[int] = None, feats_type: SignFeatsType = SignFeatsType.keypoints, bodyparts: Optional[List[str]] = None, feat_dims: List[int] = [0, 1, 2, 3], min_sample_size: int = 0, max_sample_size: Optional[int] = None, shuffle: bool = True, normalize: bool = False, text_compression_level: TextCompressionLevel = TextCompressionLevel.none, ): super().__init__() self.text_compressor = TextCompressor(level=text_compression_level) self.ids = [self.text_compressor.compress(_id) for _id in ids] self.feats_file = h5py.File(feats_file, "r") # XXX: This might be a problem, check later if sizes is None: sizes = [] for _id in self.ids: _id = self.text_compressor.decompress(_id) sizes.append(np.array(self.feats_file[_id]).shape[0]) self.sizes = sizes self.feats_type = feats_type self.bodyparts = bodyparts self.feat_dims = feat_dims self.shuffle = shuffle self.normalize = normalize self.min_sample_size = min_sample_size self.max_sample_size = ( max_sample_size if max_sample_size is not None else sys.maxsize ) self.skipped_ids = [] for _id, size in zip(self.ids[:], self.sizes[:]): if size < self.min_sample_size or size > self.max_sample_size: self.sizes.pop(self.ids.index(_id)) self.ids.remove(_id) self.skipped_ids.append(self.text_compressor.decompress(_id)) logger.info(f"Skipped {len(self.skipped_ids)} sentences, that were too short or too long.") try: import pyarrow as pa self.ids = pa.array(self.ids) except: logger.debug( "Could not create a pyarrow array. Please install pyarrow for better performance" ) pass @staticmethod def list_avail_ids(feats_file: Union[Path, str]): feats_file = h5py.File(feats_file, "r") return list(feats_file.keys()) @classmethod def from_manifest_file(cls, manifest_file: Union[str, Path], **kwargs): ids = [] sizes = [] manifest = pd.read_csv(manifest_file, sep="\t") for _, row in manifest.iterrows(): ids.append(row['SENTENCE_NAME']) size = int(row['END_FRAME']) - int(row['START_FRAME']) sizes.append(size) logger.info(f"loaded {len(ids)} samples") return cls(ids, sizes=sizes, **kwargs) def __getitem__(self, index): _id = self.ids[index] _id = _id if isinstance(self.ids, list) else _id.as_py() fn = self.text_compressor.decompress(_id) feats = torch.Tensor(np.array(self.feats_file[fn])).float() feats = self.postprocess(feats) return {"id": index, "h2s_id": _id, "source": feats} def __len__(self): return len(self.sizes) def postprocess(self, feats): if SignFeatsType[self.feats_type] is SignFeatsType.keypoints: #added SignFeatsType[] to be able to compare from fairseq.data.sign_language.utils import ( select_keypoints_by_bodypart, select_keypoints_by_dimension, ) # FIXME: check how to do this imports better feats, n_feats = select_keypoints_by_bodypart(feats, self.bodyparts) feats = select_keypoints_by_dimension(feats, self.feat_dims) feats_split = feats.reshape(-1, n_feats, 3).permute(2, 0, 1) with torch.no_grad(): feats_norm_split = F.layer_norm(feats_split, feats_split.shape[1:]) feats = feats_norm_split.permute(1, 2, 0).reshape(-1, n_feats * 3).contiguous() elif SignFeatsType[self.feats_type] is SignFeatsType.i3d or SignFeatsType[self.feats_type] is SignFeatsType.CNN2d: # should we actually normalize CNN2d features? with torch.no_grad(): feats = F.layer_norm(feats, feats.shape) #check this normalization else: raise NotImplementedError(f"Using {self.feats_type} which is not SignFeatsType.keypoints or SignFeatsType.i3d or SignFeatsType.2dCNN") return feats def collater(self, samples): samples = [s for s in samples if s["source"] is not None] if len(samples) == 0: return {} sources = [s["source"] for s in samples] sizes = [len(s) for s in sources] collated_sources = sources[0].new_zeros(len(sources), max(sizes), sources[0].shape[-1]) for i, (source, size) in enumerate(zip(sources, sizes)): diff = size - max(sizes) collated_sources[i] = torch.cat( [source, source.new_full((-diff, source.shape[-1]), 0.0)] ) return { "id": torch.LongTensor([s["id"] for s in samples]), "net_input": { "src_tokens": collated_sources, "src_lengths": torch.Tensor(sizes) # FIXME: If you use buckets } } def num_tokens(self, index): return self.size(index) def size(self, index): return self.sizes[index] def ordered_indices(self): if self.shuffle: order = np.lexsort( [np.random.permutation(len(self)), np.array(self.sizes)] ) return order[::-1] else: return np.arange(len(self)) # TODO: In task, if compute_mask_indices=True, create dataset of this type # TODO: In task, if using this, it may be useful to wrap it also with RandomCropSignFeatsDataset (remember paddings) class MaskSignFeatsDataset(BaseWrapperDataset): def __init__( self, dataset: SignFeatsDataset, **mask_compute_kwargs, ): super().__init__(dataset) self.mask_compute_kwargs = mask_compute_kwargs self._features_size_map = {} self._C = mask_compute_kwargs["encoder_embed_dim"] self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"]) def _compute_mask_indices(self, dims, padding_mask): # Create masks for Sign2vec pretraining raise NotImplementedError("This feature is still not available") B, T, C = dims mask_indices, mask_channel_indices = None, None if self.mask_compute_kwargs["mask_prob"] > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_compute_kwargs["mask_prob"], self.mask_compute_kwargs["mask_length"], self.mask_compute_kwargs["mask_selection"], self.mask_compute_kwargs["mask_other"], min_masks=2, no_overlap=self.mask_compute_kwargs["no_mask_overlap"], min_space=self.mask_compute_kwargs["mask_min_space"], ) mask_indices = torch.from_numpy(mask_indices) if self.mask_compute_kwargs["mask_channel_prob"] > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_compute_kwargs["mask_channel_prob"], self.mask_compute_kwargs["mask_channel_length"], self.mask_compute_kwargs["mask_channel_selection"], self.mask_compute_kwargs["mask_channel_other"], no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"], min_space=self.mask_compute_kwargs["mask_channel_min_space"], ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1) ) return mask_indices, mask_channel_indices def _get_mask_indices_dims(self, size, padding=0, dilation=1): raise NotImplementedError("This feature is still not available") if size not in self._features_size_map: L_in = size for (_, kernel_size, stride) in self._conv_feature_layers: L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1 L_out = 1 + L_out // stride L_in = L_out self._features_size_map[size] = L_out return self._features_size_map[size] def collater(self, samples): out = self.dataset.collater(samples) raise NotImplementedError("This feature is still not available") B = out["net_input"]["source"].size(0) T = self._get_mask_indices_dims(out["net_input"]["source"].size(-2)) padding_mask_reshaped = out["net_input"]["padding_mask"].clone() extra = padding_mask_reshaped.size(1) % T if extra > 0: padding_mask_reshaped = padding_mask_reshaped[:, :-extra] padding_mask_reshaped = padding_mask_reshaped.view( padding_mask_reshaped.size(0), T, -1 ) padding_mask_reshaped = padding_mask_reshaped.all(-1) out["net_input"]["padding_count"] = padding_mask_reshaped.sum(-1).max().item() mask_indices, mask_channel_indices = self._compute_mask_indices( (B, T, self._C), padding_mask_reshaped, ) out["net_input"]["mask_indices"] = mask_indices out["net_input"]["mask_channel_indices"] = mask_channel_indices out["sample_size"] = mask_indices.sum().item() return out class RandomCropSignFeatsDataset(RandomCropDataset): def __init__( self, dataset: SignFeatsDataset, truncation_length: int, **kwargs, ): super().__init__(dataset, truncation_length, **kwargs) def __getitem__(self, index): with numpy_seed(self.seed, self.epoch, index): item = self.dataset[index] item_len = item["source"].size(0) excess = item_len - self.truncation_length if excess > 0: start_idx = np.random.randint(0, excess) item["source"] = item["source"][start_idx : start_idx + self.truncation_length] return item
11,111
37.317241
146
py
sign-topic
sign-topic-main/fairseq/data/sign_language/utils.py
import torch import torch.nn.functional as F from typing import List, Tuple, Optional from fairseq.data.sign_language import SignFeatsType def get_num_feats( feats_type: SignFeatsType, bodyparts: Optional[List[str]] = None, feat_dims: Optional[List[int]] = None ) -> int: num_feats = { SignFeatsType.i3d: 1024, SignFeatsType.CNN2d: 1024, SignFeatsType.video: (720, 1280), SignFeatsType.keypoints: { 'face': 70, 'upperbody': 8, 'lowerbody': 16, 'lefthand': 21, 'righthand': 21 }, SignFeatsType.mediapipe_keypoints: { 'face': 70, 'upperbody': 8, 'lowerbody': 16, 'lefthand': 21, 'righthand': 21 }, SignFeatsType.rotational: 288, SignFeatsType.mediapipe_rotational: 288, SignFeatsType.text: 256, # TODO: decide which dim to return, or if this function should be called at all when using text as input SignFeatsType.text_albert: 768, SignFeatsType.spot_align: 256, SignFeatsType.spot_align_albert: 768, } if (feats_type is SignFeatsType.i3d or feats_type is SignFeatsType.CNN2d or feats_type is SignFeatsType.video or feats_type is SignFeatsType.rotational or feats_type is SignFeatsType.mediapipe_rotational or feats_type is SignFeatsType.text or feats_type is SignFeatsType.text_albert or feats_type is SignFeatsType.spot_align or feats_type is SignFeatsType.spot_align_albert ): return num_feats[feats_type] elif feats_type in [SignFeatsType.keypoints, SignFeatsType.mediapipe_keypoints]: return sum([num_feats[feats_type][b] for b in bodyparts]) * len(feat_dims) else: raise AttributeError(f"Feat type selected not supported: {feats_type}") def select_keypoints_by_bodypart( keypoints: torch.Tensor, feats_type: SignFeatsType, bodyparts: Optional[List[str]] = None, datasetType: str = 'How2Sign', ) -> Tuple[torch.Tensor, int]: if datasetType == 'Phoenix' or SignFeatsType[feats_type] in [SignFeatsType.mediapipe_keypoints]: # TODO: make sure that in task the correct value for keypoints_type is passed return keypoints.reshape(-1, 50*3).contiguous(), 50 BODY_IDX = { 'face': torch.arange(70), # 0-69 'upperbody': torch.arange(70,78), # 70-78 'lowerbody': torch.arange(78,95), # 79-94 'lefthand': torch.arange(95,116), # 95-115 'righthand': torch.arange(116,137) # 116-136 } if bodyparts is None: bodyparts = list(BODY_IDX.keys()) assert len(bodyparts) > 0, "You haven't selected any bodypart!" assert all([b in BODY_IDX.keys() for b in bodyparts]), f"You have selected a bodypart that doesn't exist! The options are: {list(BODY_IDX.keys())}" selected_idx = torch.cat([BODY_IDX[b] for b in bodyparts]) keypoints = keypoints.reshape(-1, 137, 4) keypoints_selected = keypoints[:, selected_idx] keypoints = keypoints_selected.reshape(-1, len(selected_idx) * 4).contiguous() return keypoints, len(selected_idx) def select_keypoints_by_dimension( keypoints: torch.Tensor, dimensions: List[int], feats_type: SignFeatsType, datasetType: str = 'How2Sign', ) -> torch.Tensor: assert len(dimensions) > 0, "You haven't selected any dimensions!" assert all([idx<4 for idx in dimensions]), "You have selected a dimension that doesn't exist! The options are: 0 for x, 1 for y, 2 for z and 3 for confidence score " if datasetType == 'Phoenix' or SignFeatsType[feats_type] in [SignFeatsType.mediapipe_keypoints]: # TODO: make sure that in task the correct value for keypoints_type is passed return keypoints.reshape(-1, 50*3).contiguous() selected_idx = torch.LongTensor(dimensions) n_keypoints = int(keypoints.size(-1) / 4) keypoints = keypoints.reshape(-1, n_keypoints, 4) keypoints_selected = keypoints[:, :, selected_idx] keypoints = keypoints_selected.reshape(-1, n_keypoints * len(selected_idx)).contiguous() return keypoints
4,214
38.764151
179
py
sign-topic
sign-topic-main/fairseq/data/sign_language/SL_topic_detection_dataset.py
import os import sys import logging from enum import Enum from pathlib import Path from typing import List, Union, Optional import h5py import numpy as np import pandas as pd import torchvision import torch import torch.nn.functional as F from fairseq.data import FairseqDataset from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel logger = logging.getLogger(__name__) class SignFeatsType(Enum): text = "text" text_albert = "text_albert" spot_align = "spot_align" spot_align_albert = "spot_align_albert" keypoints = "keypoints" mediapipe_keypoints = "mediapipe_keypoints" rotational = "rotational" mediapipe_rotational = "mediapipe_rotational" i3d = "i3d" CNN2d = "CNN2d" video = 'video' class SLTopicDetectionDataset(FairseqDataset): def __init__( self, manifest: pd.DataFrame, ids: List[str], feats_path: Union[Path, str], feats_type: str, sizes: List[int] = None, bodyparts: Optional[List[str]] = None, feat_dims: List[int] = [0, 1, 2, 3], min_sample_size: int = 0, max_sample_size: Optional[int] = None, shuffle: bool = True, normalize: bool = False, text_compression_level: TextCompressionLevel = TextCompressionLevel.none, ): super().__init__() self.text_compressor = TextCompressor(level=text_compression_level) self.manifest = manifest # if feats_type == SignFeatsType.video, feats_path is the directory # where .mp4 files of the corresponding split are stored self.feats_path = feats_path self.ids = [_id for _id in ids] if feats_type not in ['video']: if feats_type in ['text', 'spot_align']: self.feats_file = self.manifest.set_index('VIDEO_ID').to_dict()['TEXT'] else: self.feats_file = h5py.File(self.feats_path, 'r') if sizes is None: sizes = [] for _id in self.ids: _id = _id sizes.append(np.array(self.feats_file[_id]).shape[0]) self.sizes = sizes self.feats_type = feats_type self.bodyparts = bodyparts self.feat_dims = feat_dims self.shuffle = shuffle self.normalize = normalize self.min_sample_size = min_sample_size self.max_sample_size = ( max_sample_size if max_sample_size is not None else sys.maxsize ) self.skipped_ids = [] for _id, size in zip(self.ids[:], self.sizes[:]): if size < self.min_sample_size or size > self.max_sample_size: self.sizes.pop(self.ids.index(_id)) self.ids.remove(_id) self.skipped_ids.append(_id) logger.info(f"Skipped {len(self.skipped_ids)} input sequences, that were either too short or too long.") try: import pyarrow as pa self.ids = pa.array(self.ids) except: logger.debug( "Could not create a pyarrow array. Please install pyarrow for better performance" ) pass @staticmethod def list_avail_ids(self): return self.ids @classmethod def from_manifest_file(cls, manifest_file: Union[str, Path], **kwargs): ids = [] sizes = [] manifest = pd.read_csv(manifest_file, sep="\t") for _, row in manifest.iterrows(): ids.append(row['VIDEO_ID']) size = int(row['END_FRAME']) - int(row['START_FRAME']) + 1 sizes.append(size) logger.info(f"loaded {len(ids)} samples") return cls(manifest, ids, sizes=sizes, **kwargs) def __getitem__(self, index): _id = self.ids[index] _id = _id if isinstance(self.ids, list) else _id.as_py() fn = _id if self.feats_type in ['video']: # load corresponding mp4 # there is no repeated value in column VIDEO_ID of self.manifest video_name = self.manifest[self.manifest.VIDEO_ID.str.match(fn)]['VIDEO_NAME'].values[0] feats = torchvision.io.read_video(filename=os.path.join(self.feats_path, video_name + '.mp4'), end_pts=5115)[0] feats = feats.permute(0, 3, 1, 2) elif self.feats_type in ['text', 'spot_align']: feats = torch.Tensor(np.array(self.feats_file[fn])) else: feats = torch.Tensor(np.array(self.feats_file[fn])).float() feats = self.postprocess(feats) return {"id": index, "h2s_id": fn, "source": feats} def __len__(self): return len(self.sizes) def postprocess(self, feats): from fairseq.data.sign_language.utils import ( select_keypoints_by_bodypart, select_keypoints_by_dimension, ) if SignFeatsType[self.feats_type] in [SignFeatsType.keypoints, SignFeatsType.mediapipe_keypoints]: feats, n_feats = select_keypoints_by_bodypart(feats, feats_type=self.feats_type, bodyparts=self.bodyparts) feats = select_keypoints_by_dimension(feats, self.feat_dims, feats_type=self.feats_type) feats_split = feats.reshape(-1, n_feats, 3).permute(2, 0, 1) with torch.no_grad(): feats_norm_split = F.layer_norm(feats_split, feats_split.shape[1:]) feats = feats_norm_split.permute(1, 2, 0).reshape(-1, n_feats * 3).contiguous() elif SignFeatsType[self.feats_type] in [SignFeatsType.rotational, SignFeatsType.mediapipe_rotational]: feats_split = feats.reshape(-1, 48, 6).permute(2, 0, 1) with torch.no_grad(): feats_norm_split = F.layer_norm(feats_split, feats_split.shape[1:]) feats = feats_norm_split.permute(1, 2, 0).reshape(-1, 48 * 6).contiguous() elif (SignFeatsType[self.feats_type] is SignFeatsType.i3d or SignFeatsType[self.feats_type] is SignFeatsType.CNN2d or SignFeatsType[self.feats_type] is SignFeatsType.video or SignFeatsType[self.feats_type] is SignFeatsType.spot_align_albert or SignFeatsType[self.feats_type] is SignFeatsType.text_albert): with torch.no_grad(): feats = F.layer_norm(feats.float(), feats.shape) elif SignFeatsType[self.feats_type] in [SignFeatsType.text, SignFeatsType.spot_align]: pass else: raise NotImplementedError(f'Using {self.feats_type} which is not SignFeatsType.i3d' ' nor SignFeatsType.spot_align_albert' ' nor SignFeatsType.keypoints nor SignFeatsType.mediapipe_keypoints' ' nor SignFeatsType.rotational nor SignFeatsType.mediapipe_rotational' ' nor SignFeatsType.2dCNN nor SignFeatsType.video' ' nor SignFeatsType.text nor SignFeatsType.spot_align') return feats def collater(self, samples): samples = [s for s in samples if s["source"] is not None] if len(samples) == 0: return {} sources = [s["source"] for s in samples] sizes = [len(s) for s in sources] if self.feats_type not in ['video']: collated_sources = sources[0].new_zeros(len(sources), max(sizes), sources[0].shape[-1]) else: collated_sources = sources[0].new_zeros(len(sources), max(sizes), *sources[0].shape[-3:]) for i, (source, size) in enumerate(zip(sources, sizes)): diff = size - max(sizes) if self.feats_type not in ['video']: collated_sources[i] = torch.cat( [source, source.new_full((-diff, source.shape[-1]), 0.0)] ) else: collated_sources[i] = torch.cat( [source, source.new_full((-diff, *source.shape[-3:]), 0.0)] ) return { 'id': torch.LongTensor([s['id'] for s in samples]), 'net_input': { 'src_tokens': collated_sources, 'src_lengths': torch.Tensor(sizes) # FIXME: If you use buckets } } def num_tokens(self, index): return self.size(index) def size(self, index): return self.sizes[index] def ordered_indices(self): if self.shuffle: order = np.lexsort( [np.random.permutation(len(self)), np.array(self.sizes)] ) return order[::-1] else: return np.arange(len(self))
8,658
38.903226
123
py
sign-topic
sign-topic-main/fairseq/data/multilingual/sampled_multi_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import datetime import hashlib import logging import time from bisect import bisect_right from collections import OrderedDict, defaultdict from enum import Enum from typing import List import numpy as np import torch from fairseq.data import FairseqDataset, data_utils from fairseq.distributed import utils as distributed_utils def get_time_gap(s, e): return ( datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) ).__str__() logger = logging.getLogger(__name__) def default_virtual_size_func(datasets, ratios, max_scale_up=1.5): sizes = [len(d) for d in datasets] if ratios is None: return sum(sizes) largest_idx = np.argmax(sizes) largest_r = ratios[largest_idx] largest_s = sizes[largest_idx] # set virtual sizes relative to the largest dataset virtual_sizes = [(r / largest_r) * largest_s for r in ratios] vsize = sum(virtual_sizes) max_size = sum(sizes) * max_scale_up return int(vsize if vsize < max_size else max_size) class CollateFormat(Enum): single = 1 ordered_dict = 2 class SampledMultiDataset(FairseqDataset): """Samples from multiple sub-datasets according to given sampling ratios. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concatenating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. shared_collater (bool): whether or not to all sub-datasets have the same collater. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", shared_collater=False, shuffle=True, ): super().__init__() self.shared_collater = shared_collater self.shuffle = shuffle if isinstance(datasets, OrderedDict): self.keys = list(datasets.keys()) datasets = list(datasets.values()) elif isinstance(datasets, List): self.keys = list(range(len(datasets))) else: raise AssertionError() self.datasets = datasets self.split = split self.eval_key = eval_key if self.eval_key is not None: self.collate_format = CollateFormat.single else: self.collate_format = collate_format self.seed = seed self._cur_epoch = None self.cumulated_sizes = None # self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset # namely, data item i is sampled from the kth sub-dataset self.datasets[k] # where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k] self._cur_indices = None self._sizes = None self.virtual_size_per_dataset = None # caching properties self._reset_cached_properties() self.setup_sampling(sampling_ratios, virtual_size) self.set_epoch(epoch) def _clean_if_not_none(self, var_list): for v in var_list: if v is not None: del v def _reset_cached_properties(self): self._clean_if_not_none([self._sizes, self._cur_indices]) self._sizes = None self._cur_indices = None def setup_sampling(self, sample_ratios, virtual_size): sizes = [len(d) for d in self.datasets] if sample_ratios is None: # default back to concating datasets self.sample_ratios = None self.virtual_size = sum(sizes) else: if not isinstance(sample_ratios, np.ndarray): sample_ratios = np.array(sample_ratios) self.sample_ratios = sample_ratios virtual_size = ( default_virtual_size_func if virtual_size is None else virtual_size ) self.virtual_size = ( virtual_size(self.datasets, self.sample_ratios) if callable(virtual_size) else virtual_size ) def adjust_sampling(self, epoch, sampling_ratios, virtual_size): if sampling_ratios is not None: sampling_ratios = self._sync_sample_ratios(sampling_ratios) self.setup_sampling(sampling_ratios, virtual_size) def _sync_sample_ratios(self, ratios): # in case the ratios are not precisely the same across processes # also to ensure every procresses update the ratios in the same pace ratios = torch.DoubleTensor(ratios) if torch.distributed.is_initialized(): if torch.cuda.is_available(): distributed_utils.all_reduce( ratios.cuda(), group=distributed_utils.get_data_parallel_group() ) else: distributed_utils.all_reduce( ratios, group=distributed_utils.get_data_parallel_group() ) ret = ratios.cpu() ret = ret.numpy() return ret def random_choice_in_dataset(self, rng, dataset, choice_size): if hasattr(dataset, "random_choice_in_dataset"): return dataset.random_choice_in_dataset(rng, choice_size) dataset_size = len(dataset) return rng.choice( dataset_size, choice_size, replace=(choice_size > dataset_size) ) def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size): def get_counts(sample_ratios): counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64) diff = virtual_size - counts.sum() assert diff >= 0 # due to round-offs, the size might not match the desired sizes if diff > 0: dataset_indices = rng.choice( len(sample_ratios), size=diff, p=sample_ratios ) for i in dataset_indices: counts[i] += 1 return counts def get_in_dataset_indices(datasets, sizes, sample_ratios): counts = get_counts(sample_ratios) # uniformally sample desired counts for each dataset # if the desired counts are large, sample with replacement: indices = [ self.random_choice_in_dataset(rng, d, c) for c, d in zip(counts, datasets) ] return indices sizes = [len(d) for d in datasets] if sample_ratios is None: # default back to concating datasets in_dataset_indices = [list(range(s)) for s in sizes] virtual_sizes_per_dataset = sizes else: ratios = sample_ratios / sample_ratios.sum() in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios) virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices] virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64) cumulative_sizes = np.cumsum(virtual_sizes_per_dataset) assert sum(virtual_sizes_per_dataset) == virtual_size assert cumulative_sizes[-1] == virtual_size if virtual_size < sum(sizes): logger.warning( f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})." " If virtual size << real data size, there could be data coverage issue." ) in_dataset_indices = np.hstack(in_dataset_indices) return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset def _get_dataset_and_index(self, index): i = bisect_right(self.cumulated_sizes, index) return i, self._cur_indices[index] def __getitem__(self, index): # self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]] # where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k] ds_idx, ds_sample_idx = self._get_dataset_and_index(index) ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx]) return ret def num_tokens(self, index): return self.sizes[index].max() def num_tokens_vec(self, indices): sizes_vec = self.sizes[np.array(indices)] # max across all dimensions but first one return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape)))) def size(self, index): return self.sizes[index] def __len__(self): return self.virtual_size def collater(self, samples, **extra_args): """Merge a list of samples to form a mini-batch.""" if len(samples) == 0: return None if self.collate_format == "ordered_dict": collect_samples = [[] for _ in range(len(self.datasets))] for (i, sample) in samples: collect_samples[i].append(sample) batch = OrderedDict( [ (self.keys[i], dataset.collater(collect_samples[i])) for i, (key, dataset) in enumerate(zip(self.keys, self.datasets)) if len(collect_samples[i]) > 0 ] ) elif self.shared_collater: batch = self.datasets[0].collater([s for _, s in samples]) else: samples_dict = defaultdict(list) pad_to_length = ( defaultdict(int) if "pad_to_length" not in extra_args else extra_args["pad_to_length"] ) for ds_idx, s in samples: pad_to_length["source"] = max( pad_to_length["source"], s["source"].size(0) ) if s["target"] is not None: pad_to_length["target"] = max( pad_to_length["target"], s["target"].size(0) ) samples_dict[ds_idx].append(s) batches = [ self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length) for i in range(len(self.datasets)) if len(samples_dict[i]) > 0 ] def straight_data(tensors): batch = torch.cat(tensors, dim=0) return batch src_lengths = straight_data( [b["net_input"]["src_lengths"] for b in batches] ) src_lengths, sort_order = src_lengths.sort(descending=True) def straight_order(tensors): batch = straight_data(tensors) return batch.index_select(0, sort_order) batch = { "id": straight_order([b["id"] for b in batches]), "nsentences": sum(b["nsentences"] for b in batches), "ntokens": sum(b["ntokens"] for b in batches), "net_input": { "src_tokens": straight_order( [b["net_input"]["src_tokens"] for b in batches] ), "src_lengths": src_lengths, }, "target": straight_order([b["target"] for b in batches]) if batches[0]["target"] is not None else None, } if "prev_output_tokens" in batches[0]["net_input"]: batch["net_input"]["prev_output_tokens"] = straight_order( [b["net_input"]["prev_output_tokens"] for b in batches] ) if "src_lang_id" in batches[0]["net_input"]: batch["net_input"]["src_lang_id"] = straight_order( [b["net_input"]["src_lang_id"] for b in batches] ) if "tgt_lang_id" in batches[0]: batch["tgt_lang_id"] = straight_order( [b["tgt_lang_id"] for b in batches] ) return batch @property def sizes(self): if self._sizes is not None: return self._sizes start_time = time.time() in_sub_dataset_indices = [ self._cur_indices[ 0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i] ] for i in range(len(self.datasets)) ] sub_dataset_sizes = [ d.sizes[indices] for d, indices in zip(self.datasets, in_sub_dataset_indices) ] self._sizes = np.vstack(sub_dataset_sizes) logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}") return self._sizes def ordered_indices(self): if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")] return sort_indices def prefetch(self, indices): prefetch_indices = [[] for _ in range(len(self.datasets))] for i in indices: ds_idx, ds_sample_idx = self._get_dataset_and_index(i) prefetch_indices[ds_idx].append(ds_sample_idx) for i in range(len(prefetch_indices)): self.datasets[i].prefetch(prefetch_indices[i]) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): super().set_epoch(epoch) if epoch == self._cur_epoch: # re-enter so return return for d in self.datasets: if hasattr(d, "set_epoch"): d.set_epoch(epoch) self._cur_epoch = epoch self._establish_virtual_datasets() def _establish_virtual_datasets(self): if self.sample_ratios is None and self._cur_indices is not None: # not a samping dataset, no need to resample if indices are already established return self._reset_cached_properties() start_time = time.time() # Generate a weighted sample of indices as a function of the # random seed and the current epoch. rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed self._cur_epoch, # epoch index, ] ) self._clean_if_not_none( [self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes] ) self._sizes = None indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices( rng, self.datasets, self.sample_ratios, self.virtual_size ) self._cur_indices = indices self.cumulated_sizes = cumulated_sizes self.virtual_size_per_dataset = virtual_size_per_dataset raw_sizes = [len(d) for d in self.datasets] sampled_sizes = self.virtual_size_per_dataset logger.info( f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; " f"raw total size: {sum(raw_sizes)}" ) logger.info( f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; " f"resampled total size: {sum(sampled_sizes)}" ) if self.sample_ratios is not None: logger.info( f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}" ) else: logger.info(f"[{self.split}] A concat dataset") logger.info( f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}" ) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ sizes = self.sizes tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) return data_utils.filter_paired_dataset_indices_by_size( src_sizes, tgt_sizes, indices, max_sizes )
18,342
38.194444
119
py
sign-topic
sign-topic-main/fairseq/data/multilingual/multilingual_utils.py
from enum import Enum from typing import Dict, List, Optional, Sequence import torch from fairseq.data import Dictionary class EncoderLangtok(Enum): """ Prepend to the beginning of source sentence either the source or target language token. (src/tgt). """ src = "src" tgt = "tgt" class LangTokSpec(Enum): main = "main" mono_dae = "mono_dae" class LangTokStyle(Enum): multilingual = "multilingual" mbart = "mbart" @torch.jit.export def get_lang_tok( lang: str, lang_tok_style: str, spec: str = LangTokSpec.main.value ) -> str: # TOKEN_STYLES can't be defined outside this fn since it needs to be # TorchScriptable. TOKEN_STYLES: Dict[str, str] = { LangTokStyle.mbart.value: "[{}]", LangTokStyle.multilingual.value: "__{}__", } if spec.endswith("dae"): lang = f"{lang}_dae" elif spec.endswith("mined"): lang = f"{lang}_mined" style = TOKEN_STYLES[lang_tok_style] return style.format(lang) def augment_dictionary( dictionary: Dictionary, language_list: List[str], lang_tok_style: str, langtoks_specs: Sequence[str] = (LangTokSpec.main.value,), extra_data: Optional[Dict[str, str]] = None, ) -> None: for spec in langtoks_specs: for language in language_list: dictionary.add_symbol( get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec) ) if lang_tok_style == LangTokStyle.mbart.value or ( extra_data is not None and LangTokSpec.mono_dae.value in extra_data ): dictionary.add_symbol("<mask>")
1,623
24.375
85
py
sign-topic
sign-topic-main/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import hashlib import logging import math import numpy as np from fairseq.data import SampledMultiDataset from .sampled_multi_dataset import CollateFormat, default_virtual_size_func logger = logging.getLogger(__name__) class SampledMultiEpochDataset(SampledMultiDataset): """Samples from multiple sub-datasets according to sampling ratios using virtual epoch sizes to speed up dataloading. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded. shared_collater (bool): whether or not to all sub-datasets have the same collater. shard_epoch (int): the real epoch number for shard selection. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", virtual_epoch_size=None, shared_collater=False, shard_epoch=1, shuffle=True, ): self.virtual_epoch_size = virtual_epoch_size self._current_epoch_start_index = None self._random_global_indices = None self.shard_epoch = shard_epoch if shard_epoch is not None else 1 self.load_next_shard = None self._epoch_sizes = None super().__init__( datasets=datasets, sampling_ratios=sampling_ratios, seed=seed, epoch=epoch, eval_key=eval_key, collate_format=collate_format, virtual_size=virtual_size, split=split, shared_collater=shared_collater, shuffle=shuffle, ) def _setup(self, epoch): self.virtual_epoch_size = ( self.virtual_epoch_size if self.virtual_epoch_size is not None else self.virtual_size ) if self.virtual_epoch_size > self.virtual_size: logger.warning( f"virtual epoch size {self.virtual_epoch_size} " f"is greater than virtual dataset size {self.virtual_size}" ) self.virtual_epoch_size = self.virtual_size self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size) self._current_epoch_start_index = self._get_epoch_start_index(epoch) logger.info( f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}" ) def _map_epoch_index_to_global(self, index): index = self._current_epoch_start_index + index # add randomness return self._random_global_indices[index] @property def sizes(self): if self._epoch_sizes is not None: return self._epoch_sizes _sizes = super().sizes indices = self._random_global_indices[ self._current_epoch_start_index : self._current_epoch_start_index + len(self) ] self._epoch_sizes = _sizes[indices] # del super()._sizes to save memory del self._sizes self._sizes = None return self._epoch_sizes def _get_dataset_and_index(self, index): i = self._map_epoch_index_to_global(index) return super()._get_dataset_and_index(i) def __len__(self): return ( self.virtual_epoch_size if self._current_epoch_start_index + self.virtual_epoch_size < self.virtual_size else self.virtual_size - self._current_epoch_start_index ) def set_epoch(self, epoch): if self._current_epoch_start_index is None: # initializing epoch idnices of a virtual dataset self._setup(epoch) self._next_virtual_epoch(epoch) else: # working on already intialized epoch indices if epoch == self._cur_epoch: # re-enter so return return self._next_virtual_epoch(epoch) def _get_epoch_start_index(self, epoch): assert epoch >= 1 # fairseq is using 1-based epoch everywhere return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size def _next_global_indices(self, epoch): rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed epoch, # epoch index, ] ) del self._random_global_indices self._random_global_indices = rng.choice( self.virtual_size, self.virtual_size, replace=False ) if self.load_next_shard is None: self.load_next_shard = False else: # increase shard epoch for next loading self.shard_epoch += 1 self.load_next_shard = True logger.info( "to load next epoch/shard in next load_dataset: " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) def _next_virtual_epoch(self, epoch): index = self._get_epoch_start_index(epoch) if index == 0 or self._random_global_indices is None: # need to start from the beginning, # so call super().set_epoch(epoch) to establish the global virtual indices logger.info( "establishing a new set of global virtual indices for " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) super().set_epoch(epoch) self._next_global_indices(epoch) else: self._cur_epoch = epoch # reset cache sizes and ordered_indices for the epoch after moving to a new epoch self._clean_if_not_none( [ self._epoch_sizes, ] ) self._epoch_sizes = None self._current_epoch_start_index = index
7,827
38.14
119
py
sign-topic
sign-topic-main/fairseq/data/audio/hubert_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import os import sys from typing import Any, List, Optional, Union import numpy as np import torch import torch.nn.functional as F from fairseq.data import data_utils from fairseq.data.fairseq_dataset import FairseqDataset logger = logging.getLogger(__name__) def load_audio(manifest_path, max_keep, min_keep): n_long, n_short = 0, 0 names, inds, sizes = [], [], [] with open(manifest_path) as f: root = f.readline().strip() for ind, line in enumerate(f): items = line.strip().split("\t") assert len(items) == 2, line sz = int(items[1]) if min_keep is not None and sz < min_keep: n_short += 1 elif max_keep is not None and sz > max_keep: n_long += 1 else: names.append(items[0]) inds.append(ind) sizes.append(sz) tot = ind + 1 logger.info( ( f"max_keep={max_keep}, min_keep={min_keep}, " f"loaded {len(names)}, skipped {n_short} short and {n_long} long, " f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}" ) ) return root, names, inds, tot, sizes def load_label(label_path, inds, tot): with open(label_path) as f: labels = [line.rstrip() for line in f] assert ( len(labels) == tot ), f"number of labels does not match ({len(labels)} != {tot})" labels = [labels[i] for i in inds] return labels def load_label_offset(label_path, inds, tot): with open(label_path) as f: code_lengths = [len(line.encode("utf-8")) for line in f] assert ( len(code_lengths) == tot ), f"number of labels does not match ({len(code_lengths)} != {tot})" offsets = list(itertools.accumulate([0] + code_lengths)) offsets = [(offsets[i], offsets[i + 1]) for i in inds] return offsets def verify_label_lengths( audio_sizes, audio_rate, label_path, label_rate, inds, tot, tol=0.1, # tolerance in seconds ): if label_rate < 0: logger.info(f"{label_path} is sequence label. skipped") return with open(label_path) as f: lengths = [len(line.rstrip().split()) for line in f] assert len(lengths) == tot lengths = [lengths[i] for i in inds] num_invalid = 0 for i, ind in enumerate(inds): dur_from_audio = audio_sizes[i] / audio_rate dur_from_label = lengths[i] / label_rate if abs(dur_from_audio - dur_from_label) > tol: logger.warning( ( f"audio and label duration differ too much " f"(|{dur_from_audio} - {dur_from_label}| > {tol}) " f"in line {ind+1} of {label_path}. Check if `label_rate` " f"is correctly set (currently {label_rate}). " f"num. of samples = {audio_sizes[i]}; " f"label length = {lengths[i]}" ) ) num_invalid += 1 if num_invalid > 0: logger.warning( f"total {num_invalid} (audio, label) pairs with mismatched lengths" ) class HubertDataset(FairseqDataset): def __init__( self, manifest_path: str, sample_rate: float, label_paths: List[str], label_rates: Union[List[float], float], # -1 for sequence labels pad_list: List[str], eos_list: List[str], label_processors: Optional[List[Any]] = None, max_keep_sample_size: Optional[int] = None, min_keep_sample_size: Optional[int] = None, max_sample_size: Optional[int] = None, shuffle: bool = True, pad_audio: bool = False, normalize: bool = False, store_labels: bool = True, random_crop: bool = False, single_target: bool = False, ): self.audio_root, self.audio_names, inds, tot, self.sizes = load_audio( manifest_path, max_keep_sample_size, min_keep_sample_size ) self.sample_rate = sample_rate self.shuffle = shuffle self.random_crop = random_crop self.num_labels = len(label_paths) self.pad_list = pad_list self.eos_list = eos_list self.label_processors = label_processors self.single_target = single_target self.label_rates = ( [label_rates for _ in range(len(label_paths))] if isinstance(label_rates, int) else label_rates ) self.store_labels = store_labels if store_labels: self.label_list = [load_label(p, inds, tot) for p in label_paths] else: self.label_paths = label_paths self.label_offsets_list = [ load_label_offset(p, inds, tot) for p in label_paths ] assert label_processors is None or len(label_processors) == self.num_labels for label_path, label_rate in zip(label_paths, self.label_rates): verify_label_lengths( self.sizes, sample_rate, label_path, label_rate, inds, tot ) self.max_sample_size = ( max_sample_size if max_sample_size is not None else sys.maxsize ) self.pad_audio = pad_audio self.normalize = normalize logger.info( f"pad_audio={pad_audio}, random_crop={random_crop}, " f"normalize={normalize}, max_sample_size={self.max_sample_size}" ) def get_audio(self, index): import soundfile as sf wav_path = os.path.join(self.audio_root, self.audio_names[index]) wav, cur_sample_rate = sf.read(wav_path) wav = torch.from_numpy(wav).float() wav = self.postprocess(wav, cur_sample_rate) return wav def get_label(self, index, label_idx): if self.store_labels: label = self.label_list[label_idx][index] else: with open(self.label_paths[label_idx]) as f: offset_s, offset_e = self.label_offsets_list[label_idx][index] f.seek(offset_s) label = f.read(offset_e - offset_s) if self.label_processors is not None: label = self.label_processors[label_idx](label) return label def get_labels(self, index): return [self.get_label(index, i) for i in range(self.num_labels)] def __getitem__(self, index): wav = self.get_audio(index) labels = self.get_labels(index) return {"id": index, "source": wav, "label_list": labels} def __len__(self): return len(self.sizes) def crop_to_max_size(self, wav, target_size): size = len(wav) diff = size - target_size if diff <= 0: return wav, 0 start, end = 0, target_size if self.random_crop: start = np.random.randint(0, diff + 1) end = size - diff + start return wav[start:end], start def collater(self, samples): # target = max(sizes) -> random_crop not used # target = max_sample_size -> random_crop used for long samples = [s for s in samples if s["source"] is not None] if len(samples) == 0: return {} audios = [s["source"] for s in samples] audio_sizes = [len(s) for s in audios] if self.pad_audio: audio_size = min(max(audio_sizes), self.max_sample_size) else: audio_size = min(min(audio_sizes), self.max_sample_size) collated_audios, padding_mask, audio_starts = self.collater_audio( audios, audio_size ) targets_by_label = [ [s["label_list"][i] for s in samples] for i in range(self.num_labels) ] targets_list, lengths_list, ntokens_list = self.collater_label( targets_by_label, audio_size, audio_starts ) net_input = {"source": collated_audios, "padding_mask": padding_mask} batch = { "id": torch.LongTensor([s["id"] for s in samples]), "net_input": net_input, } if self.single_target: batch["target_lengths"] = lengths_list[0] batch["ntokens"] = ntokens_list[0] batch["target"] = targets_list[0] else: batch["target_lengths_list"] = lengths_list batch["ntokens_list"] = ntokens_list batch["target_list"] = targets_list return batch def collater_audio(self, audios, audio_size): collated_audios = audios[0].new_zeros(len(audios), audio_size) padding_mask = ( torch.BoolTensor(collated_audios.shape).fill_(False) # if self.pad_audio else None ) audio_starts = [0 for _ in audios] for i, audio in enumerate(audios): diff = len(audio) - audio_size if diff == 0: collated_audios[i] = audio elif diff < 0: assert self.pad_audio collated_audios[i] = torch.cat([audio, audio.new_full((-diff,), 0.0)]) padding_mask[i, diff:] = True else: collated_audios[i], audio_starts[i] = self.crop_to_max_size( audio, audio_size ) return collated_audios, padding_mask, audio_starts def collater_frm_label(self, targets, audio_size, audio_starts, label_rate, pad): assert label_rate > 0 s2f = label_rate / self.sample_rate frm_starts = [int(round(s * s2f)) for s in audio_starts] frm_size = int(round(audio_size * s2f)) if not self.pad_audio: rem_size = [len(t) - s for t, s in zip(targets, frm_starts)] frm_size = min(frm_size, *rem_size) targets = [t[s : s + frm_size] for t, s in zip(targets, frm_starts)] logger.debug(f"audio_starts={audio_starts}") logger.debug(f"frame_starts={frm_starts}") logger.debug(f"frame_size={frm_size}") lengths = torch.LongTensor([len(t) for t in targets]) ntokens = lengths.sum().item() targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False) return targets, lengths, ntokens def collater_seq_label(self, targets, pad): lengths = torch.LongTensor([len(t) for t in targets]) ntokens = lengths.sum().item() targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False) return targets, lengths, ntokens def collater_label(self, targets_by_label, audio_size, audio_starts): targets_list, lengths_list, ntokens_list = [], [], [] itr = zip(targets_by_label, self.label_rates, self.pad_list) for targets, label_rate, pad in itr: if label_rate == -1: targets, lengths, ntokens = self.collater_seq_label(targets, pad) else: targets, lengths, ntokens = self.collater_frm_label( targets, audio_size, audio_starts, label_rate, pad ) targets_list.append(targets) lengths_list.append(lengths) ntokens_list.append(ntokens) return targets_list, lengths_list, ntokens_list def num_tokens(self, index): return self.size(index) def size(self, index): if self.pad_audio: return self.sizes[index] return min(self.sizes[index], self.max_sample_size) def ordered_indices(self): if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order)[::-1] def postprocess(self, wav, cur_sample_rate): if wav.dim() == 2: wav = wav.mean(-1) assert wav.dim() == 1, wav.dim() if cur_sample_rate != self.sample_rate: raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}") if self.normalize: with torch.no_grad(): wav = F.layer_norm(wav, wav.shape) return wav
12,325
34.727536
86
py
sign-topic
sign-topic-main/fairseq/data/audio/multi_modality_dataset.py
# Copyright (c) 2021-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import math from typing import List, Optional, NamedTuple import numpy as np import torch from fairseq.data import ( ConcatDataset, LanguagePairDataset, FileAudioDataset, data_utils, ) from fairseq.data import FairseqDataset logger = logging.getLogger(__name__) class ModalityDatasetItem(NamedTuple): datasetname: str dataset: any max_positions: List[int] max_tokens: Optional[int] = None max_sentences: Optional[int] = None # MultiModalityDataset: it concate multiple datasets with different modalities. # Compared with ConcatDataset it can 1) sample data given the ratios for different datasets # 2) it adds mode to indicate what type of the data samples come from. # It will be used with GroupedEpochBatchIterator together to generate mini-batch with samples # from the same type of dataset # If only one dataset is used, it will perform like the original dataset with mode added class MultiModalityDataset(ConcatDataset): def __init__(self, datasets: List[ModalityDatasetItem]): id_to_mode = [] dsets = [] max_tokens = [] max_sentences = [] max_positions = [] for dset in datasets: id_to_mode.append(dset.datasetname) dsets.append(dset.dataset) max_tokens.append(dset.max_tokens) max_positions.append(dset.max_positions) max_sentences.append(dset.max_sentences) weights = [1.0 for s in dsets] super().__init__(dsets, weights) self.max_tokens = max_tokens self.max_positions = max_positions self.max_sentences = max_sentences self.id_to_mode = id_to_mode self.raw_sub_batch_samplers = [] self._cur_epoch = 0 def set_epoch(self, epoch): super().set_epoch(epoch) self._cur_epoch = epoch def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) sample = self.datasets[dataset_idx][sample_idx] return (dataset_idx, sample) def collater(self, samples): if len(samples) == 0: return {} dataset_idx = samples[0][0] # make sure all samples in samples are from same dataset assert sum([0 if dataset_idx == s[0] else 1 for s in samples]) == 0 samples = self.datasets[dataset_idx].collater([x[1] for x in samples]) # add mode samples["net_input"]["mode"] = self.id_to_mode[dataset_idx] return samples def size(self, index: int): if len(self.datasets) == 1: return self.datasets[0].size(index) return super().size(index) @property def sizes(self): if len(self.datasets) == 1: return self.datasets[0].sizes super().sizes def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ if len(self.datasets) == 1: return self.datasets[0].ordered_indices() indices_group = [] for d_idx, ds in enumerate(self.datasets): sample_num = self.cumulative_sizes[d_idx] if d_idx > 0: sample_num = sample_num - self.cumulative_sizes[d_idx - 1] assert sample_num == len(ds) indices_group.append(ds.ordered_indices()) return indices_group def get_raw_batch_samplers(self, required_batch_size_multiple, seed): if len(self.raw_sub_batch_samplers) > 0: logger.info(" raw_sub_batch_samplers exists. No action is taken") return with data_utils.numpy_seed(seed): indices = self.ordered_indices() for i, ds in enumerate(self.datasets): indices[i] = ds.filter_indices_by_size( indices[i], self.max_positions[i], )[0] sub_batch_sampler = ds.batch_by_size( indices[i], max_tokens=self.max_tokens[i], max_sentences=self.max_sentences[i], required_batch_size_multiple=required_batch_size_multiple, ) self.raw_sub_batch_samplers.append(sub_batch_sampler) def get_batch_samplers(self, mult_ratios, required_batch_size_multiple, seed): self.get_raw_batch_samplers(required_batch_size_multiple, seed) batch_samplers = [] for i, _ in enumerate(self.datasets): if i > 0: sub_batch_sampler = [ [y + self.cumulative_sizes[i - 1] for y in x] for x in self.raw_sub_batch_samplers[i] ] else: sub_batch_sampler = list(self.raw_sub_batch_samplers[i]) smp_r = mult_ratios[i] if smp_r != 1: is_increase = "increased" if smp_r > 1 else "decreased" logger.info( "number of batch for the dataset {} is {} from {} to {}".format( self.id_to_mode[i], is_increase, len(sub_batch_sampler), int(len(sub_batch_sampler) * smp_r), ) ) mul_samplers = [] for _ in range(math.floor(smp_r)): mul_samplers = mul_samplers + sub_batch_sampler if math.floor(smp_r) != smp_r: with data_utils.numpy_seed(seed + self._cur_epoch): np.random.shuffle(sub_batch_sampler) smp_num = int( (smp_r - math.floor(smp_r)) * len(sub_batch_sampler) ) mul_samplers = mul_samplers + sub_batch_sampler[:smp_num] sub_batch_sampler = mul_samplers else: logger.info( "dataset {} batch number is {} ".format( self.id_to_mode[i], len(sub_batch_sampler) ) ) batch_samplers.append(sub_batch_sampler) return batch_samplers class LangPairMaskDataset(FairseqDataset): def __init__( self, dataset: LanguagePairDataset, src_eos: int, src_bos: Optional[int] = None, noise_id: Optional[int] = -1, mask_ratio: Optional[float] = 0, mask_type: Optional[str] = "random", ): self.dataset = dataset self.src_eos = src_eos self.src_bos = src_bos self.noise_id = noise_id self.mask_ratio = mask_ratio self.mask_type = mask_type assert mask_type in ("random", "tail") @property def src_sizes(self): return self.dataset.src_sizes @property def tgt_sizes(self): return self.dataset.tgt_sizes @property def sizes(self): # dataset.sizes can be a dynamically computed sizes: return self.dataset.sizes def get_batch_shapes(self): return self.dataset.buckets def num_tokens_vec(self, indices): return self.dataset.num_tokens_vec(indices) def __len__(self): return len(self.dataset) def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices) def mask_src_tokens(self, sample): src_item = sample["source"] mask = None if self.mask_type == "random": mask = torch.rand(len(src_item)).le(self.mask_ratio) else: mask = torch.ones(len(src_item)) mask[: int(len(src_item) * (1 - self.mask_ratio))] = 0 mask = mask.eq(1) if src_item[0] == self.src_bos: mask[0] = False if src_item[-1] == self.src_eos: mask[-1] = False mask_src_item = src_item.masked_fill(mask, self.noise_id) smp = {"id": sample["id"], "source": mask_src_item, "target": sample["target"]} return smp def __getitem__(self, index): sample = self.dataset[index] if self.mask_ratio > 0: sample = self.mask_src_tokens(sample) return sample def collater(self, samples, pad_to_length=None): return self.dataset.collater(samples, pad_to_length) class FileAudioDatasetWrapper(FileAudioDataset): def collater(self, samples): samples = super().collater(samples) if len(samples) == 0: return {} samples["net_input"]["src_tokens"] = samples["net_input"]["source"] samples["net_input"]["prev_output_tokens"] = None del samples["net_input"]["source"] samples["net_input"]["src_lengths"] = None samples["net_input"]["alignment"] = None return samples
9,288
34.05283
93
py
sign-topic
sign-topic-main/fairseq/data/audio/speech_to_speech_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass import logging from pathlib import Path from typing import Dict, List, Optional, Tuple import torch from fairseq.data import ( ConcatDataset, data_utils as fairseq_data_utils, Dictionary, ) from fairseq.data.audio.data_cfg import S2SDataConfig from fairseq.data.audio.speech_to_text_dataset import ( _collate_frames, get_features_or_waveform, SpeechToTextDataset, SpeechToTextDatasetCreator, ) logger = logging.getLogger(__name__) @dataclass class SpeechToSpeechDatasetItem(object): index: int source: torch.Tensor target: Optional[torch.Tensor] = None target_speaker: Optional[torch.Tensor] = None class SpeechToSpeechDataset(SpeechToTextDataset): def __init__( self, split: str, is_train_split: bool, data_cfg: S2SDataConfig, src_audio_paths: List[str], src_n_frames: List[int], tgt_audio_paths: List[str], tgt_n_frames: List[int], ids: Optional[List[str]] = None, target_is_code: bool = False, tgt_dict: Dictionary = None, n_frames_per_step: int = 1, ): tgt_texts = tgt_audio_paths if target_is_code else None super().__init__( split, is_train_split, data_cfg, src_audio_paths, src_n_frames, ids=ids, tgt_dict=tgt_dict, tgt_texts=tgt_texts, n_frames_per_step=n_frames_per_step, ) self.tgt_audio_paths = tgt_audio_paths self.tgt_lens = [t // self.n_frames_per_step for t in tgt_n_frames] assert not target_is_code or tgt_dict is not None self.target_is_code = target_is_code assert len(tgt_audio_paths) == self.n_samples assert len(tgt_n_frames) == self.n_samples self.tgt_speakers = None if self.cfg.target_speaker_embed: samples = SpeechToTextDatasetCreator._load_samples_from_tsv( self.cfg.target_speaker_embed, split ) spk_emb_dict = {s["id"]: s["speaker_embed"] for s in samples} self.tgt_speakers = [spk_emb_dict[id] for id in self.ids] assert len(self.tgt_speakers) == self.n_samples logger.info(self.__repr__()) def pack_units(self, input: torch.Tensor) -> torch.Tensor: if self.n_frames_per_step <= 1: return input offset = 4 vocab_size = ( len(self.tgt_dict) - offset ) # remove offset from <bos>, <pad>, <eos>, <unk>, which is specific to fairseq dictionary assert input.dim() == 1 stacked_input = ( input[:-1].view(-1, self.n_frames_per_step) - offset ) # remove <eos> scale = [ pow(vocab_size, self.n_frames_per_step - 1 - i) for i in range(self.n_frames_per_step) ] scale = torch.LongTensor(scale).squeeze(0) res = input.new((len(input) - 1) // self.n_frames_per_step + 1).fill_(input[-1]) res[:-1] = (stacked_input * scale).sum(dim=1) + offset return res def __getitem__(self, index: int) -> SpeechToSpeechDatasetItem: source = self._get_source_audio(index) if not self.target_is_code: target = get_features_or_waveform(self.tgt_audio_paths[index]) target = torch.from_numpy(target).float() target = self.pack_frames(target) else: target = self.tgt_dict.encode_line( self.tgt_audio_paths[index], add_if_not_exist=False, append_eos=True, ).long() if self.n_frames_per_step > 1: n_tgt_frame = target.size(0) - 1 # exclude <eos> keep_n_tgt_frame = n_tgt_frame - n_tgt_frame % self.n_frames_per_step target = torch.cat( ( target[:keep_n_tgt_frame], target.new_full((1,), self.tgt_dict.eos()), ), dim=0, ) if self.tgt_speakers: tgt_spk = get_features_or_waveform(self.tgt_speakers[index]) tgt_spk = torch.from_numpy(tgt_spk).float() else: tgt_spk = torch.FloatTensor([]) return SpeechToSpeechDatasetItem( index=index, source=source, target=target, target_speaker=tgt_spk ) def _collate_target(self, samples: List[SpeechToSpeechDatasetItem]) -> torch.Tensor: if self.target_is_code: target = fairseq_data_utils.collate_tokens( [x.target for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False, ) # convert stacked units to a single id pack_targets = [self.pack_units(x.target) for x in samples] prev_output_tokens = fairseq_data_utils.collate_tokens( pack_targets, self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=True, ) target_lengths = torch.tensor( [x.size(0) for x in pack_targets], dtype=torch.long ) else: target = _collate_frames([x.target for x in samples], is_audio_input=False) bsz, _, d = target.size() prev_output_tokens = torch.cat( (target.new_full((bsz, 1, d), 0.0), target[:, :-1, :]), dim=1 ) target_lengths = torch.tensor( [x.target.size(0) for x in samples], dtype=torch.long ) return target, prev_output_tokens, target_lengths def collater( self, samples: List[SpeechToSpeechDatasetItem], return_order: bool = False ) -> Dict: if len(samples) == 0: return {} indices = torch.tensor([x.index for x in samples], dtype=torch.long) frames = _collate_frames([x.source for x in samples], self.cfg.use_audio_input) # sort samples by descending number of frames n_frames = torch.tensor([x.source.size(0) for x in samples], dtype=torch.long) n_frames, order = n_frames.sort(descending=True) indices = indices.index_select(0, order) frames = frames.index_select(0, order) target, prev_output_tokens, target_lengths = self._collate_target(samples) target = target.index_select(0, order) target_lengths = target_lengths.index_select(0, order) prev_output_tokens = prev_output_tokens.index_select(0, order) ntokens = sum(x.target.size(0) for x in samples) tgt_speakers = None if self.cfg.target_speaker_embed: tgt_speakers = _collate_frames( [x.target_speaker for x in samples], is_audio_input=True ).index_select(0, order) net_input = { "src_tokens": frames, "src_lengths": n_frames, "prev_output_tokens": prev_output_tokens, "tgt_speaker": tgt_speakers, # TODO: unify "speaker" and "tgt_speaker" } out = { "id": indices, "net_input": net_input, "speaker": tgt_speakers, # to support Tacotron2 loss for speech-to-spectrogram model "target": target, "target_lengths": target_lengths, "ntokens": ntokens, "nsentences": len(samples), } if return_order: out["order"] = order return out class TextTargetMultitaskData(object): # mandatory columns KEY_ID, KEY_TEXT = "id", "tgt_text" def __init__(self, args, split, tgt_dict): samples = SpeechToTextDatasetCreator._load_samples_from_tsv(args.data, split) self.data = {s[self.KEY_ID]: s[self.KEY_TEXT] for s in samples} self.dict = tgt_dict self.append_eos = args.decoder_type != "ctc" def get(self, sample_id): if sample_id in self.data: return self.dict.encode_line( self.data[sample_id], add_if_not_exist=False, append_eos=self.append_eos, ) else: logger.warning(f"no target for {sample_id}") return torch.IntTensor([]) def collater(self, samples: List[torch.Tensor]) -> torch.Tensor: out = fairseq_data_utils.collate_tokens( samples, self.dict.pad(), self.dict.eos(), left_pad=False, move_eos_to_beginning=False, ).long() prev_out = fairseq_data_utils.collate_tokens( samples, self.dict.pad(), self.dict.eos(), left_pad=False, move_eos_to_beginning=True, ).long() target_lengths = torch.tensor([t.size(0) for t in samples], dtype=torch.long) ntokens = sum(t.size(0) for t in samples) output = { "prev_output_tokens": prev_out, "target": out, "target_lengths": target_lengths, "ntokens": ntokens, } return output class SpeechToSpeechMultitaskDataset(SpeechToSpeechDataset): def __init__(self, *argv): super().__init__(*argv) self.multitask_data = {} def add_multitask_dataset(self, task_name, task_data): self.multitask_data[task_name] = task_data def __getitem__( self, index: int ) -> Tuple[SpeechToSpeechDatasetItem, Dict[str, torch.Tensor]]: s2s_data = super().__getitem__(index) multitask_target = {} sample_id = self.ids[index] for task_name, task_dataset in self.multitask_data.items(): multitask_target[task_name] = task_dataset.get(sample_id) return s2s_data, multitask_target def collater( self, samples: List[Tuple[SpeechToSpeechDatasetItem, Dict[str, torch.Tensor]]] ) -> Dict: if len(samples) == 0: return {} out = super().collater([s for s, _ in samples], return_order=True) order = out["order"] del out["order"] for task_name, task_dataset in self.multitask_data.items(): if "multitask" not in out: out["multitask"] = {} d = [s[task_name] for _, s in samples] task_target = task_dataset.collater(d) out["multitask"][task_name] = { "target": task_target["target"].index_select(0, order), "target_lengths": task_target["target_lengths"].index_select(0, order), "ntokens": task_target["ntokens"], } out["multitask"][task_name]["net_input"] = { "prev_output_tokens": task_target["prev_output_tokens"].index_select( 0, order ), } return out class SpeechToSpeechDatasetCreator(object): # mandatory columns KEY_ID, KEY_SRC_AUDIO, KEY_SRC_N_FRAMES = "id", "src_audio", "src_n_frames" KEY_TGT_AUDIO, KEY_TGT_N_FRAMES = "tgt_audio", "tgt_n_frames" @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], data_cfg: S2SDataConfig, target_is_code: bool = False, target_dictionary: Dictionary = None, n_frames_per_step: int = 1, multitask: Optional[Dict] = None, ) -> SpeechToSpeechDataset: audio_root = Path(data_cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] src_audio_paths = [ (audio_root / s[cls.KEY_SRC_AUDIO]).as_posix() for s in samples ] tgt_audio_paths = [ s[cls.KEY_TGT_AUDIO] if target_is_code else (audio_root / s[cls.KEY_TGT_AUDIO]).as_posix() for s in samples ] src_n_frames = [int(s[cls.KEY_SRC_N_FRAMES]) for s in samples] tgt_n_frames = [int(s[cls.KEY_TGT_N_FRAMES]) for s in samples] has_multitask = len(multitask) > 0 dataset_cls = ( SpeechToSpeechMultitaskDataset if has_multitask else SpeechToSpeechDataset ) ds = dataset_cls( split_name, is_train_split, data_cfg, src_audio_paths, src_n_frames, tgt_audio_paths, tgt_n_frames, ids, target_is_code, target_dictionary, n_frames_per_step, ) if has_multitask: for task_name, task_obj in multitask.items(): task_data = TextTargetMultitaskData( task_obj.args, split_name, task_obj.target_dictionary ) ds.add_multitask_dataset(task_name, task_data) return ds @classmethod def from_tsv( cls, root: str, data_cfg: S2SDataConfig, splits: str, is_train_split: bool, epoch: int, seed: int, target_is_code: bool = False, target_dictionary: Dictionary = None, n_frames_per_step: int = 1, multitask: Optional[Dict] = None, ) -> SpeechToSpeechDataset: datasets = [] for split in splits.split(","): samples = SpeechToTextDatasetCreator._load_samples_from_tsv(root, split) ds = cls._from_list( split, is_train_split, samples, data_cfg, target_is_code, target_dictionary, n_frames_per_step, multitask, ) datasets.append(ds) return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
13,961
33.304668
99
py
sign-topic
sign-topic-main/fairseq/data/audio/text_to_speech_dataset.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory.abs from pathlib import Path from typing import List, Dict, Optional, Any from dataclasses import dataclass import numpy as np import torch from fairseq.data.audio.speech_to_text_dataset import ( SpeechToTextDataset, SpeechToTextDatasetCreator, S2TDataConfig, _collate_frames, get_features_or_waveform, ) from fairseq.data import Dictionary, data_utils as fairseq_data_utils @dataclass class TextToSpeechDatasetItem(object): index: int source: torch.Tensor target: Optional[torch.Tensor] = None speaker_id: Optional[int] = None duration: Optional[torch.Tensor] = None pitch: Optional[torch.Tensor] = None energy: Optional[torch.Tensor] = None class TextToSpeechDataset(SpeechToTextDataset): def __init__( self, split: str, is_train_split: bool, cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None, durations: Optional[List[List[int]]] = None, pitches: Optional[List[str]] = None, energies: Optional[List[str]] = None, ): super(TextToSpeechDataset, self).__init__( split, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id, ) self.durations = durations self.pitches = pitches self.energies = energies def __getitem__(self, index: int) -> TextToSpeechDatasetItem: s2t_item = super().__getitem__(index) duration, pitch, energy = None, None, None if self.durations is not None: duration = torch.tensor( self.durations[index] + [0], dtype=torch.long # pad 0 for EOS ) if self.pitches is not None: pitch = get_features_or_waveform(self.pitches[index]) pitch = torch.from_numpy( np.concatenate((pitch, [0])) # pad 0 for EOS ).float() if self.energies is not None: energy = get_features_or_waveform(self.energies[index]) energy = torch.from_numpy( np.concatenate((energy, [0])) # pad 0 for EOS ).float() return TextToSpeechDatasetItem( index=index, source=s2t_item.source, target=s2t_item.target, speaker_id=s2t_item.speaker_id, duration=duration, pitch=pitch, energy=energy, ) def collater(self, samples: List[TextToSpeechDatasetItem]) -> Dict[str, Any]: if len(samples) == 0: return {} src_lengths, order = torch.tensor( [s.target.shape[0] for s in samples], dtype=torch.long ).sort(descending=True) id_ = torch.tensor([s.index for s in samples], dtype=torch.long).index_select( 0, order ) feat = _collate_frames( [s.source for s in samples], self.cfg.use_audio_input ).index_select(0, order) target_lengths = torch.tensor( [s.source.shape[0] for s in samples], dtype=torch.long ).index_select(0, order) src_tokens = fairseq_data_utils.collate_tokens( [s.target for s in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False, ).index_select(0, order) speaker = None if self.speaker_to_id is not None: speaker = ( torch.tensor([s.speaker_id for s in samples], dtype=torch.long) .index_select(0, order) .view(-1, 1) ) bsz, _, d = feat.size() prev_output_tokens = torch.cat( (feat.new_zeros((bsz, 1, d)), feat[:, :-1, :]), dim=1 ) durations, pitches, energies = None, None, None if self.durations is not None: durations = fairseq_data_utils.collate_tokens( [s.duration for s in samples], 0 ).index_select(0, order) assert src_tokens.shape[1] == durations.shape[1] if self.pitches is not None: pitches = _collate_frames([s.pitch for s in samples], True) pitches = pitches.index_select(0, order) assert src_tokens.shape[1] == pitches.shape[1] if self.energies is not None: energies = _collate_frames([s.energy for s in samples], True) energies = energies.index_select(0, order) assert src_tokens.shape[1] == energies.shape[1] src_texts = [self.tgt_dict.string(samples[i].target) for i in order] return { "id": id_, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, "prev_output_tokens": prev_output_tokens, }, "speaker": speaker, "target": feat, "durations": durations, "pitches": pitches, "energies": energies, "target_lengths": target_lengths, "ntokens": sum(target_lengths).item(), "nsentences": len(samples), "src_texts": src_texts, } class TextToSpeechDatasetCreator(SpeechToTextDatasetCreator): KEY_DURATION = "duration" KEY_PITCH = "pitch" KEY_ENERGY = "energy" @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, ) -> TextToSpeechDataset: audio_root = Path(cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples] n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples] tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples] src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples] speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples] src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples] tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples] durations = [s.get(cls.KEY_DURATION, None) for s in samples] durations = [ None if dd is None else [int(d) for d in dd.split(" ")] for dd in durations ] durations = None if any(dd is None for dd in durations) else durations pitches = [s.get(cls.KEY_PITCH, None) for s in samples] pitches = [ None if pp is None else (audio_root / pp).as_posix() for pp in pitches ] pitches = None if any(pp is None for pp in pitches) else pitches energies = [s.get(cls.KEY_ENERGY, None) for s in samples] energies = [ None if ee is None else (audio_root / ee).as_posix() for ee in energies ] energies = None if any(ee is None for ee in energies) else energies return TextToSpeechDataset( split_name, is_train_split, cfg, audio_paths, n_frames, src_texts, tgt_texts, speakers, src_langs, tgt_langs, ids, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, durations, pitches, energies, )
8,583
33.473896
87
py
sign-topic
sign-topic-main/fairseq/data/audio/frm_text_to_speech_dataset.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory.abs import csv import logging import os.path as op from typing import List, Optional import numpy as np import torch from fairseq.data import Dictionary from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig from fairseq.data.audio.text_to_speech_dataset import ( TextToSpeechDataset, TextToSpeechDatasetCreator, ) logger = logging.getLogger(__name__) class FrmTextToSpeechDataset(TextToSpeechDataset): def __init__( self, split: str, is_train_split: bool, data_cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None, do_chunk=False, chunk_bound=-1, chunk_init=50, chunk_incr=5, add_eos=True, dedup=True, ref_fpu=-1, ): # It assumes texts are encoded at a fixed frame-rate super().__init__( split=split, is_train_split=is_train_split, data_cfg=data_cfg, audio_paths=audio_paths, n_frames=n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id, ) self.do_chunk = do_chunk self.chunk_bound = chunk_bound self.chunk_init = chunk_init self.chunk_incr = chunk_incr self.add_eos = add_eos self.dedup = dedup self.ref_fpu = ref_fpu self.chunk_size = -1 if do_chunk: assert self.chunk_incr >= 0 assert self.pre_tokenizer is None def __getitem__(self, index): index, source, target, speaker_id, _, _, _ = super().__getitem__(index) if target[-1].item() == self.tgt_dict.eos_index: target = target[:-1] fpu = source.size(0) / target.size(0) # frame-per-unit fps = self.n_frames_per_step assert ( self.ref_fpu == -1 or abs((fpu * fps - self.ref_fpu) / self.ref_fpu) < 0.1 ), f"{fpu*fps} != {self.ref_fpu}" # only chunk training split if self.is_train_split and self.do_chunk and self.chunk_size > 0: lang = target[: int(self.data_cfg.prepend_tgt_lang_tag)] text = target[int(self.data_cfg.prepend_tgt_lang_tag) :] size = len(text) chunk_size = min(self.chunk_size, size) chunk_start = np.random.randint(size - chunk_size + 1) text = text[chunk_start : chunk_start + chunk_size] target = torch.cat((lang, text), 0) f_size = int(np.floor(chunk_size * fpu)) f_start = int(np.floor(chunk_start * fpu)) assert f_size > 0 source = source[f_start : f_start + f_size, :] if self.dedup: target = torch.unique_consecutive(target) if self.add_eos: eos_idx = self.tgt_dict.eos_index target = torch.cat((target, torch.LongTensor([eos_idx])), 0) return index, source, target, speaker_id def set_epoch(self, epoch): if self.is_train_split and self.do_chunk: old = self.chunk_size self.chunk_size = self.chunk_init + epoch * self.chunk_incr if self.chunk_bound > 0: self.chunk_size = min(self.chunk_size, self.chunk_bound) logger.info( ( f"{self.split}: setting chunk size " f"from {old} to {self.chunk_size}" ) ) class FrmTextToSpeechDatasetCreator(TextToSpeechDatasetCreator): # inherit for key names @classmethod def from_tsv( cls, root: str, data_cfg: S2TDataConfig, split: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, n_frames_per_step: int, speaker_to_id, do_chunk: bool = False, chunk_bound: int = -1, chunk_init: int = 50, chunk_incr: int = 5, add_eos: bool = True, dedup: bool = True, ref_fpu: float = -1, ) -> FrmTextToSpeechDataset: tsv_path = op.join(root, f"{split}.tsv") if not op.isfile(tsv_path): raise FileNotFoundError(f"Dataset not found: {tsv_path}") with open(tsv_path) as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) s = [dict(e) for e in reader] assert len(s) > 0 ids = [ss[cls.KEY_ID] for ss in s] audio_paths = [op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s] n_frames = [int(ss[cls.KEY_N_FRAMES]) for ss in s] tgt_texts = [ss[cls.KEY_TGT_TEXT] for ss in s] src_texts = [ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s] speakers = [ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s] src_langs = [ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s] tgt_langs = [ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s] return FrmTextToSpeechDataset( split=split, is_train_split=is_train_split, data_cfg=data_cfg, audio_paths=audio_paths, n_frames=n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id, do_chunk=do_chunk, chunk_bound=chunk_bound, chunk_init=chunk_init, chunk_incr=chunk_incr, add_eos=add_eos, dedup=dedup, ref_fpu=ref_fpu, )
6,923
32.61165
86
py
sign-topic
sign-topic-main/fairseq/data/audio/raw_audio_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import io import numpy as np import torch import torch.nn.functional as F from .. import FairseqDataset from ..data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes from fairseq.data.audio.audio_utils import ( parse_path, read_from_stored_zip, is_sf_audio_data, ) from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel logger = logging.getLogger(__name__) class RawAudioDataset(FairseqDataset): def __init__( self, sample_rate, max_sample_size=None, min_sample_size=0, shuffle=True, pad=False, normalize=False, compute_mask_indices=False, **mask_compute_kwargs, ): super().__init__() self.sample_rate = sample_rate self.sizes = [] self.max_sample_size = ( max_sample_size if max_sample_size is not None else sys.maxsize ) self.min_sample_size = min_sample_size self.pad = pad self.shuffle = shuffle self.normalize = normalize self.compute_mask_indices = compute_mask_indices if self.compute_mask_indices: self.mask_compute_kwargs = mask_compute_kwargs self._features_size_map = {} self._C = mask_compute_kwargs["encoder_embed_dim"] self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"]) def __getitem__(self, index): raise NotImplementedError() def __len__(self): return len(self.sizes) def postprocess(self, feats, curr_sample_rate): if feats.dim() == 2: feats = feats.mean(-1) if curr_sample_rate != self.sample_rate: raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}") assert feats.dim() == 1, feats.dim() if self.normalize: with torch.no_grad(): feats = F.layer_norm(feats, feats.shape) return feats def crop_to_max_size(self, wav, target_size): size = len(wav) diff = size - target_size if diff <= 0: return wav start = np.random.randint(0, diff + 1) end = size - diff + start return wav[start:end] def _compute_mask_indices(self, dims, padding_mask): B, T, C = dims mask_indices, mask_channel_indices = None, None if self.mask_compute_kwargs["mask_prob"] > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_compute_kwargs["mask_prob"], self.mask_compute_kwargs["mask_length"], self.mask_compute_kwargs["mask_selection"], self.mask_compute_kwargs["mask_other"], min_masks=2, no_overlap=self.mask_compute_kwargs["no_mask_overlap"], min_space=self.mask_compute_kwargs["mask_min_space"], ) mask_indices = torch.from_numpy(mask_indices) if self.mask_compute_kwargs["mask_channel_prob"] > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_compute_kwargs["mask_channel_prob"], self.mask_compute_kwargs["mask_channel_length"], self.mask_compute_kwargs["mask_channel_selection"], self.mask_compute_kwargs["mask_channel_other"], no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"], min_space=self.mask_compute_kwargs["mask_channel_min_space"], ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1) ) return mask_indices, mask_channel_indices @staticmethod def _bucket_tensor(tensor, num_pad, value): return F.pad(tensor, (0, num_pad), value=value) def collater(self, samples): samples = [s for s in samples if s["source"] is not None] if len(samples) == 0: return {} sources = [s["source"] for s in samples] sizes = [len(s) for s in sources] if self.pad: target_size = min(max(sizes), self.max_sample_size) else: target_size = min(min(sizes), self.max_sample_size) collated_sources = sources[0].new_zeros(len(sources), target_size) padding_mask = ( torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None ) for i, (source, size) in enumerate(zip(sources, sizes)): diff = size - target_size if diff == 0: collated_sources[i] = source elif diff < 0: assert self.pad collated_sources[i] = torch.cat( [source, source.new_full((-diff,), 0.0)] ) padding_mask[i, diff:] = True else: collated_sources[i] = self.crop_to_max_size(source, target_size) input = {"source": collated_sources} out = {"id": torch.LongTensor([s["id"] for s in samples])} if self.pad: input["padding_mask"] = padding_mask if hasattr(self, "num_buckets") and self.num_buckets > 0: assert self.pad, "Cannot bucket without padding first." bucket = max(self._bucketed_sizes[s["id"]] for s in samples) num_pad = bucket - collated_sources.size(-1) if num_pad: input["source"] = self._bucket_tensor(collated_sources, num_pad, 0) input["padding_mask"] = self._bucket_tensor(padding_mask, num_pad, True) if self.compute_mask_indices: B = input["source"].size(0) T = self._get_mask_indices_dims(input["source"].size(-1)) padding_mask_reshaped = input["padding_mask"].clone() extra = padding_mask_reshaped.size(1) % T if extra > 0: padding_mask_reshaped = padding_mask_reshaped[:, :-extra] padding_mask_reshaped = padding_mask_reshaped.view( padding_mask_reshaped.size(0), T, -1 ) padding_mask_reshaped = padding_mask_reshaped.all(-1) input["padding_count"] = padding_mask_reshaped.sum(-1).max().item() mask_indices, mask_channel_indices = self._compute_mask_indices( (B, T, self._C), padding_mask_reshaped, ) input["mask_indices"] = mask_indices input["mask_channel_indices"] = mask_channel_indices out["sample_size"] = mask_indices.sum().item() out["net_input"] = input return out def _get_mask_indices_dims(self, size, padding=0, dilation=1): if size not in self._features_size_map: L_in = size for (_, kernel_size, stride) in self._conv_feature_layers: L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1 L_out = 1 + L_out // stride L_in = L_out self._features_size_map[size] = L_out return self._features_size_map[size] def num_tokens(self, index): return self.size(index) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if self.pad: return self.sizes[index] return min(self.sizes[index], self.max_sample_size) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] order.append( np.minimum( np.array(self.sizes), self.max_sample_size, ) ) return np.lexsort(order)[::-1] else: return np.arange(len(self)) def set_bucket_info(self, num_buckets): self.num_buckets = num_buckets if self.num_buckets > 0: self._collated_sizes = np.minimum( np.array(self.sizes), self.max_sample_size, ) self.buckets = get_buckets( self._collated_sizes, self.num_buckets, ) self._bucketed_sizes = get_bucketed_sizes( self._collated_sizes, self.buckets ) logger.info( f"{len(self.buckets)} bucket(s) for the audio dataset: " f"{self.buckets}" ) class FileAudioDataset(RawAudioDataset): def __init__( self, manifest_path, sample_rate, max_sample_size=None, min_sample_size=0, shuffle=True, pad=False, normalize=False, num_buckets=0, compute_mask_indices=False, text_compression_level=TextCompressionLevel.none, **mask_compute_kwargs, ): super().__init__( sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, pad=pad, normalize=normalize, compute_mask_indices=compute_mask_indices, **mask_compute_kwargs, ) self.text_compressor = TextCompressor(level=text_compression_level) skipped = 0 self.fnames = [] sizes = [] self.skipped_indices = set() with open(manifest_path, "r") as f: self.root_dir = f.readline().strip() for i, line in enumerate(f): items = line.strip().split("\t") assert len(items) == 2, line sz = int(items[1]) if min_sample_size is not None and sz < min_sample_size: skipped += 1 self.skipped_indices.add(i) continue self.fnames.append(self.text_compressor.compress(items[0])) sizes.append(sz) logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples") self.sizes = np.array(sizes, dtype=np.int64) try: import pyarrow self.fnames = pyarrow.array(self.fnames) except: logger.debug( "Could not create a pyarrow array. Please install pyarrow for better performance" ) pass self.set_bucket_info(num_buckets) def __getitem__(self, index): import soundfile as sf fn = self.fnames[index] fn = fn if isinstance(self.fnames, list) else fn.as_py() fn = self.text_compressor.decompress(fn) path_or_fp = os.path.join(self.root_dir, fn) _path, slice_ptr = parse_path(path_or_fp) if len(slice_ptr) == 2: byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1]) assert is_sf_audio_data(byte_data) path_or_fp = io.BytesIO(byte_data) wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32") feats = torch.from_numpy(wav).float() feats = self.postprocess(feats, curr_sample_rate) return {"id": index, "source": feats} class BinarizedAudioDataset(RawAudioDataset): def __init__( self, data_dir, split, sample_rate, max_sample_size=None, min_sample_size=0, shuffle=True, pad=False, normalize=False, num_buckets=0, compute_mask_indices=False, **mask_compute_kwargs, ): super().__init__( sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, pad=pad, normalize=normalize, compute_mask_indices=compute_mask_indices, **mask_compute_kwargs, ) from fairseq.data import data_utils, Dictionary self.fnames_dict = Dictionary.load(os.path.join(data_dir, "dict.txt")) root_path = os.path.join(data_dir, f"{split}.root") if os.path.exists(root_path): with open(root_path, "r") as f: self.root_dir = next(f).strip() else: self.root_dir = None fnames_path = os.path.join(data_dir, split) self.fnames = data_utils.load_indexed_dataset(fnames_path, self.fnames_dict) lengths_path = os.path.join(data_dir, f"{split}.lengths") with open(lengths_path, "r") as f: for line in f: sz = int(line.rstrip()) assert ( sz >= min_sample_size ), f"Min sample size is not supported for binarized dataset, but found a sample with size {sz}" self.sizes.append(sz) self.sizes = np.array(self.sizes, dtype=np.int64) self.set_bucket_info(num_buckets) logger.info(f"loaded {len(self.fnames)} samples") def __getitem__(self, index): import soundfile as sf fname = self.fnames_dict.string(self.fnames[index], separator="") if self.root_dir: fname = os.path.join(self.root_dir, fname) wav, curr_sample_rate = sf.read(fname) feats = torch.from_numpy(wav).float() feats = self.postprocess(feats, curr_sample_rate) return {"id": index, "source": feats}
13,679
33.720812
111
py
sign-topic
sign-topic-main/fairseq/data/audio/audio_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import BinaryIO, Optional, Tuple, Union, List import mmap import numpy as np import torch import torch.nn.functional as F SF_AUDIO_FILE_EXTENSIONS = {".wav", ".flac", ".ogg"} FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS = {".npy", ".wav", ".flac", ".ogg"} def convert_waveform( waveform: Union[np.ndarray, torch.Tensor], sample_rate: int, normalize_volume: bool = False, to_mono: bool = False, to_sample_rate: Optional[int] = None, ) -> Tuple[Union[np.ndarray, torch.Tensor], int]: """convert a waveform: - to a target sample rate - from multi-channel to mono channel - volume normalization Args: waveform (numpy.ndarray or torch.Tensor): 2D original waveform (channels x length) sample_rate (int): original sample rate normalize_volume (bool): perform volume normalization to_mono (bool): convert to mono channel if having multiple channels to_sample_rate (Optional[int]): target sample rate Returns: waveform (numpy.ndarray): converted 2D waveform (channels x length) sample_rate (float): target sample rate """ try: import torchaudio.sox_effects as ta_sox except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") effects = [] if normalize_volume: effects.append(["gain", "-n"]) if to_sample_rate is not None and to_sample_rate != sample_rate: effects.append(["rate", f"{to_sample_rate}"]) if to_mono and waveform.shape[0] > 1: effects.append(["channels", "1"]) if len(effects) > 0: is_np_input = isinstance(waveform, np.ndarray) _waveform = torch.from_numpy(waveform) if is_np_input else waveform converted, converted_sample_rate = ta_sox.apply_effects_tensor( _waveform, sample_rate, effects ) if is_np_input: converted = converted.numpy() return converted, converted_sample_rate return waveform, sample_rate def get_waveform( path_or_fp: Union[str, BinaryIO], normalization: bool = True, mono: bool = True, frames: int = -1, start: int = 0, always_2d: bool = True, output_sample_rate: Optional[int] = None, normalize_volume: bool = False, ) -> Tuple[np.ndarray, int]: """Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio. Args: path_or_fp (str or BinaryIO): the path or file-like object normalization (bool): normalize values to [-1, 1] (Default: True) mono (bool): convert multi-channel audio to mono-channel one frames (int): the number of frames to read. (-1 for reading all) start (int): Where to start reading. A negative value counts from the end. always_2d (bool): always return 2D array even for mono-channel audios output_sample_rate (Optional[int]): output sample rate normalize_volume (bool): normalize volume Returns: waveform (numpy.ndarray): 1D or 2D waveform (channels x length) sample_rate (float): sample rate """ if isinstance(path_or_fp, str): ext = Path(path_or_fp).suffix if ext not in SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f"Unsupported audio format: {ext}") try: import soundfile as sf except ImportError: raise ImportError("Please install soundfile: pip install soundfile") waveform, sample_rate = sf.read( path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start ) waveform = waveform.T # T x C -> C x T waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=normalize_volume, to_mono=mono, to_sample_rate=output_sample_rate, ) if not normalization: waveform *= 2 ** 15 # denormalized to 16-bit signed integers if not always_2d: waveform = waveform.squeeze(axis=0) return waveform, sample_rate def _get_kaldi_fbank( waveform: np.ndarray, sample_rate: int, n_bins=80 ) -> Optional[np.ndarray]: """Get mel-filter bank features via PyKaldi.""" try: from kaldi.feat.fbank import FbankOptions, Fbank from kaldi.feat.mel import MelBanksOptions from kaldi.feat.window import FrameExtractionOptions from kaldi.matrix import Vector mel_opts = MelBanksOptions() mel_opts.num_bins = n_bins frame_opts = FrameExtractionOptions() frame_opts.samp_freq = sample_rate opts = FbankOptions() opts.mel_opts = mel_opts opts.frame_opts = frame_opts fbank = Fbank(opts=opts) features = fbank.compute(Vector(waveform.squeeze()), 1.0).numpy() return features except ImportError: return None def _get_torchaudio_fbank( waveform: np.ndarray, sample_rate, n_bins=80 ) -> Optional[np.ndarray]: """Get mel-filter bank features via TorchAudio.""" try: import torchaudio.compliance.kaldi as ta_kaldi waveform = torch.from_numpy(waveform) features = ta_kaldi.fbank( waveform, num_mel_bins=n_bins, sample_frequency=sample_rate ) return features.numpy() except ImportError: return None def get_fbank(path_or_fp: Union[str, BinaryIO], n_bins=80) -> np.ndarray: """Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi (faster CPP implementation) to TorchAudio (Python implementation). Note that Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized.""" waveform, sample_rate = get_waveform(path_or_fp, normalization=False) features = _get_kaldi_fbank(waveform, sample_rate, n_bins) if features is None: features = _get_torchaudio_fbank(waveform, sample_rate, n_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable " "online filterbank feature extraction" ) return features def is_npy_data(data: bytes) -> bool: return data[0] == 147 and data[1] == 78 def is_sf_audio_data(data: bytes) -> bool: is_wav = data[0] == 82 and data[1] == 73 and data[2] == 70 is_flac = data[0] == 102 and data[1] == 76 and data[2] == 97 is_ogg = data[0] == 79 and data[1] == 103 and data[2] == 103 return is_wav or is_flac or is_ogg def mmap_read(path: str, offset: int, length: int) -> bytes: with open(path, "rb") as f: with mmap.mmap(f.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_o: data = mmap_o[offset : offset + length] return data def read_from_stored_zip(zip_path: str, offset: int, length: int) -> bytes: return mmap_read(zip_path, offset, length) def parse_path(path: str) -> Tuple[str, List[int]]: """Parse data path which is either a path to 1. a .npy/.wav/.flac/.ogg file 2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]" Args: path (str): the data path to parse Returns: file_path (str): the file path slice_ptr (list of int): empty in case 1; byte offset and length for the slice in case 2 """ if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS: _path, slice_ptr = path, [] else: _path, *slice_ptr = path.split(":") if not Path(_path).is_file(): raise FileNotFoundError(f"File not found: {_path}") assert len(slice_ptr) in {0, 2}, f"Invalid path: {path}" slice_ptr = [int(i) for i in slice_ptr] return _path, slice_ptr def get_window(window_fn: callable, n_fft: int, win_length: int) -> torch.Tensor: padding = n_fft - win_length assert padding >= 0 return F.pad(window_fn(win_length), (padding // 2, padding - padding // 2)) def get_fourier_basis(n_fft: int) -> torch.Tensor: basis = np.fft.fft(np.eye(n_fft)) basis = np.vstack( [np.real(basis[: n_fft // 2 + 1, :]), np.imag(basis[: n_fft // 2 + 1, :])] ) return torch.from_numpy(basis).float() def get_mel_filters( sample_rate: int, n_fft: int, n_mels: int, f_min: float, f_max: float ) -> torch.Tensor: try: import librosa except ImportError: raise ImportError("Please install librosa: pip install librosa") basis = librosa.filters.mel(sample_rate, n_fft, n_mels, f_min, f_max) return torch.from_numpy(basis).float() class TTSSpectrogram(torch.nn.Module): def __init__( self, n_fft: int, win_length: int, hop_length: int, window_fn: callable = torch.hann_window, return_phase: bool = False, ) -> None: super(TTSSpectrogram, self).__init__() self.n_fft = n_fft self.hop_length = hop_length self.return_phase = return_phase basis = get_fourier_basis(n_fft).unsqueeze(1) basis *= get_window(window_fn, n_fft, win_length) self.register_buffer("basis", basis) def forward( self, waveform: torch.Tensor ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: padding = (self.n_fft // 2, self.n_fft // 2) x = F.pad(waveform.unsqueeze(1), padding, mode="reflect") x = F.conv1d(x, self.basis, stride=self.hop_length) real_part = x[:, : self.n_fft // 2 + 1, :] imag_part = x[:, self.n_fft // 2 + 1 :, :] magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) if self.return_phase: phase = torch.atan2(imag_part, real_part) return magnitude, phase return magnitude class TTSMelScale(torch.nn.Module): def __init__( self, n_mels: int, sample_rate: int, f_min: float, f_max: float, n_stft: int ) -> None: super(TTSMelScale, self).__init__() basis = get_mel_filters(sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max) self.register_buffer("basis", basis) def forward(self, specgram: torch.Tensor) -> torch.Tensor: return torch.matmul(self.basis, specgram)
10,253
33.759322
84
py
sign-topic
sign-topic-main/fairseq/data/audio/speech_to_text_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import csv import io import logging import re from collections import defaultdict from pathlib import Path from typing import Dict, List, Optional from dataclasses import dataclass import numpy as np import torch from fairseq.data import ( ConcatDataset, Dictionary, FairseqDataset, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.audio_utils import ( get_fbank, get_waveform, read_from_stored_zip, is_npy_data, is_sf_audio_data, parse_path, FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS, ) from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform from fairseq.data.audio.data_cfg import S2TDataConfig logger = logging.getLogger(__name__) def get_features_from_npy_or_audio(path): ext = Path(path).suffix if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f'Unsupported file format for "{path}"') return np.load(path) if ext == ".npy" else get_fbank(path) def get_features_or_waveform_from_stored_zip( path, byte_offset, byte_size, need_waveform=False, use_sample_rate=None, ): assert path.endswith(".zip") data = read_from_stored_zip(path, byte_offset, byte_size) f = io.BytesIO(data) if is_npy_data(data): features_or_waveform = np.load(f) elif is_sf_audio_data(data): features_or_waveform = ( get_waveform(f, always_2d=False, output_sample_rate=use_sample_rate)[0] if need_waveform else get_fbank(f) ) else: raise ValueError(f'Unknown file format for "{path}"') return features_or_waveform def get_features_or_waveform(path: str, need_waveform=False, use_sample_rate=None): """Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform. """ _path, slice_ptr = parse_path(path) if len(slice_ptr) == 0: if need_waveform: return get_waveform( _path, always_2d=False, output_sample_rate=use_sample_rate )[0] return get_features_from_npy_or_audio(_path) elif len(slice_ptr) == 2: features_or_waveform = get_features_or_waveform_from_stored_zip( _path, slice_ptr[0], slice_ptr[1], need_waveform=need_waveform, use_sample_rate=use_sample_rate, ) else: raise ValueError(f"Invalid path: {path}") return features_or_waveform def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False ) -> torch.Tensor: """ Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ max_len = max(frame.size(0) for frame in frames) if is_audio_input: out = frames[0].new_zeros((len(frames), max_len)) else: out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1))) for i, v in enumerate(frames): out[i, : v.size(0)] = v return out @dataclass class SpeechToTextDatasetItem(object): index: int source: torch.Tensor target: Optional[torch.Tensor] = None speaker_id: Optional[int] = None class SpeechToTextDataset(FairseqDataset): LANG_TAG_TEMPLATE = "<lang:{}>" def __init__( self, split: str, is_train_split: bool, cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None, append_eos=True, ): self.split, self.is_train_split = split, is_train_split self.cfg = cfg self.audio_paths, self.n_frames = audio_paths, n_frames self.n_samples = len(audio_paths) assert len(n_frames) == self.n_samples > 0 assert src_texts is None or len(src_texts) == self.n_samples assert tgt_texts is None or len(tgt_texts) == self.n_samples assert speakers is None or len(speakers) == self.n_samples assert src_langs is None or len(src_langs) == self.n_samples assert tgt_langs is None or len(tgt_langs) == self.n_samples assert ids is None or len(ids) == self.n_samples assert (tgt_dict is None and tgt_texts is None) or ( tgt_dict is not None and tgt_texts is not None ) self.src_texts, self.tgt_texts = src_texts, tgt_texts self.src_langs, self.tgt_langs = src_langs, tgt_langs self.speakers = speakers self.tgt_dict = tgt_dict self.check_tgt_lang_tag() self.ids = ids self.shuffle = cfg.shuffle if is_train_split else False self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict( self.cfg.get_feature_transforms(split, is_train_split) ) self.pre_tokenizer = pre_tokenizer self.bpe_tokenizer = bpe_tokenizer self.n_frames_per_step = n_frames_per_step self.speaker_to_id = speaker_to_id self.tgt_lens = self.get_tgt_lens_and_check_oov() self.append_eos = append_eos logger.info(self.__repr__()) def get_tgt_lens_and_check_oov(self): if self.tgt_texts is None: return [0 for _ in range(self.n_samples)] tgt_lens = [] n_tokens, n_oov_tokens = 0, 0 for i in range(self.n_samples): tokenized = self.get_tokenized_tgt_text(i).split(" ") oov_tokens = [ t for t in tokenized if self.tgt_dict.index(t) == self.tgt_dict.unk_index ] n_tokens += len(tokenized) n_oov_tokens += len(oov_tokens) tgt_lens.append(len(tokenized)) logger.info(f"'{self.split}' has {n_oov_tokens / n_tokens * 100:.2f}% OOV") return tgt_lens def __repr__(self): return ( self.__class__.__name__ + f'(split="{self.split}", n_samples={self.n_samples:_}, ' f"prepend_tgt_lang_tag={self.cfg.prepend_tgt_lang_tag}, " f"shuffle={self.shuffle}, transforms={self.feature_transforms}, " f"n_frames_per_step={self.n_frames_per_step}" ) @classmethod def is_lang_tag(cls, token): pattern = cls.LANG_TAG_TEMPLATE.replace("{}", "(.*)") return re.match(pattern, token) def check_tgt_lang_tag(self): if self.cfg.prepend_tgt_lang_tag: assert self.tgt_langs is not None and self.tgt_dict is not None tgt_lang_tags = [ self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs) ] assert all(t in self.tgt_dict for t in tgt_lang_tags) @classmethod def tokenize(cls, tokenizer, text: str): return text if tokenizer is None else tokenizer.encode(text) def get_tokenized_tgt_text(self, index: int): text = self.tokenize(self.pre_tokenizer, self.tgt_texts[index]) text = self.tokenize(self.bpe_tokenizer, text) return text def pack_frames(self, feature: torch.Tensor): if self.n_frames_per_step == 1: return feature n_packed_frames = feature.shape[0] // self.n_frames_per_step feature = feature[: self.n_frames_per_step * n_packed_frames] return feature.reshape(n_packed_frames, -1) @classmethod def get_lang_tag_idx(cls, lang: str, dictionary: Dictionary): lang_tag_idx = dictionary.index(cls.LANG_TAG_TEMPLATE.format(lang)) assert lang_tag_idx != dictionary.unk() return lang_tag_idx def _get_source_audio(self, index: int) -> torch.Tensor: source = get_features_or_waveform( self.audio_paths[index], need_waveform=self.cfg.use_audio_input, use_sample_rate=self.cfg.use_sample_rate, ) if self.feature_transforms is not None: assert not self.cfg.use_audio_input source = self.feature_transforms(source) source = torch.from_numpy(source).float() return source def __getitem__(self, index: int) -> SpeechToTextDatasetItem: source = self._get_source_audio(index) source = self.pack_frames(source) target = None if self.tgt_texts is not None: tokenized = self.get_tokenized_tgt_text(index) target = self.tgt_dict.encode_line( tokenized, add_if_not_exist=False, append_eos=self.append_eos ).long() if self.cfg.prepend_tgt_lang_tag: lang_tag_idx = self.get_lang_tag_idx( self.tgt_langs[index], self.tgt_dict ) target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0) speaker_id = None if self.speaker_to_id is not None: speaker_id = self.speaker_to_id[self.speakers[index]] return SpeechToTextDatasetItem( index=index, source=source, target=target, speaker_id=speaker_id ) def __len__(self): return self.n_samples def collater( self, samples: List[SpeechToTextDatasetItem], return_order: bool = False ) -> Dict: if len(samples) == 0: return {} indices = torch.tensor([x.index for x in samples], dtype=torch.long) frames = _collate_frames([x.source for x in samples], self.cfg.use_audio_input) # sort samples by descending number of frames n_frames = torch.tensor([x.source.size(0) for x in samples], dtype=torch.long) n_frames, order = n_frames.sort(descending=True) indices = indices.index_select(0, order) frames = frames.index_select(0, order) target, target_lengths = None, None prev_output_tokens = None ntokens = None if self.tgt_texts is not None: target = fairseq_data_utils.collate_tokens( [x.target for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False, ) target = target.index_select(0, order) target_lengths = torch.tensor( [x.target.size(0) for x in samples], dtype=torch.long ).index_select(0, order) prev_output_tokens = fairseq_data_utils.collate_tokens( [x.target for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=True, ) prev_output_tokens = prev_output_tokens.index_select(0, order) ntokens = sum(x.target.size(0) for x in samples) speaker = None if self.speaker_to_id is not None: speaker = ( torch.tensor([s.speaker_id for s in samples], dtype=torch.long) .index_select(0, order) .view(-1, 1) ) net_input = { "src_tokens": frames, "src_lengths": n_frames, "prev_output_tokens": prev_output_tokens, } out = { "id": indices, "net_input": net_input, "speaker": speaker, "target": target, "target_lengths": target_lengths, "ntokens": ntokens, "nsentences": len(samples), } if return_order: out["order"] = order return out def num_tokens(self, index): return self.n_frames[index] def size(self, index): return self.n_frames[index], self.tgt_lens[index] @property def sizes(self): return np.array(self.n_frames) @property def can_reuse_epoch_itr_across_epochs(self): return True def ordered_indices(self): if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] # first by descending order of # of frames then by original/random order order.append([-n for n in self.n_frames]) return np.lexsort(order) def prefetch(self, indices): raise False class SpeechToTextDatasetCreator(object): # mandatory columns KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames" KEY_TGT_TEXT = "tgt_text" # optional columns KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text" KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang" # default values DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = "" @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, ) -> SpeechToTextDataset: audio_root = Path(cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples] n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples] tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples] src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples] speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples] src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples] tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples] return SpeechToTextDataset( split_name, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id, ) @classmethod def get_size_ratios( cls, datasets: List[SpeechToTextDataset], alpha: float = 1.0 ) -> List[float]: """Size ratios for temperature-based sampling (https://arxiv.org/abs/1907.05019)""" id_to_lp, lp_to_sz = {}, defaultdict(int) for ds in datasets: lang_pairs = {f"{s}->{t}" for s, t in zip(ds.src_langs, ds.tgt_langs)} assert len(lang_pairs) == 1 lang_pair = list(lang_pairs)[0] id_to_lp[ds.split] = lang_pair lp_to_sz[lang_pair] += sum(ds.n_frames) sz_sum = sum(v for v in lp_to_sz.values()) lp_to_prob = {k: v / sz_sum for k, v in lp_to_sz.items()} lp_to_tgt_prob = {k: v ** alpha for k, v in lp_to_prob.items()} prob_sum = sum(v for v in lp_to_tgt_prob.values()) lp_to_tgt_prob = {k: v / prob_sum for k, v in lp_to_tgt_prob.items()} lp_to_sz_ratio = { k: (lp_to_tgt_prob[k] * sz_sum) / v for k, v in lp_to_sz.items() } size_ratio = [lp_to_sz_ratio[id_to_lp[ds.split]] for ds in datasets] p_formatted = { k: f"{lp_to_prob[k]:.3f}->{lp_to_tgt_prob[k]:.3f}" for k in lp_to_sz } logger.info(f"sampling probability balancing: {p_formatted}") sr_formatted = {ds.split: f"{r:.3f}" for ds, r in zip(datasets, size_ratio)} logger.info(f"balanced sampling size ratio: {sr_formatted}") return size_ratio @classmethod def _load_samples_from_tsv(cls, root: str, split: str): tsv_path = Path(root) / f"{split}.tsv" if not tsv_path.is_file(): raise FileNotFoundError(f"Dataset not found: {tsv_path}") with open(tsv_path) as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) samples = [dict(e) for e in reader] if len(samples) == 0: raise ValueError(f"Empty manifest: {tsv_path}") return samples @classmethod def _from_tsv( cls, root: str, cfg: S2TDataConfig, split: str, tgt_dict, is_train_split: bool, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, ) -> SpeechToTextDataset: samples = cls._load_samples_from_tsv(root, split) return cls._from_list( split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, ) @classmethod def from_tsv( cls, root: str, cfg: S2TDataConfig, splits: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, n_frames_per_step: int = 1, speaker_to_id=None, ) -> SpeechToTextDataset: datasets = [ cls._from_tsv( root, cfg, split, tgt_dict, is_train_split, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, ) for split in splits.split(",") ] if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0: # temperature-based sampling size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha) datasets = [ ResamplingDataset( d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0) ) for r, d in zip(size_ratios, datasets) ] return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
19,100
33.478339
87
py
sign-topic
sign-topic-main/fairseq/data/audio/speech_to_text_joint_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from typing import Dict, List, Optional, NamedTuple import torch from fairseq.data import ( ConcatDataset, Dictionary, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.speech_to_text_dataset import ( SpeechToTextDataset, S2TDataConfig, SpeechToTextDatasetCreator, ) logger = logging.getLogger(__name__) class S2TJointDataConfig(S2TDataConfig): """Wrapper class for data config YAML""" @property def src_vocab_filename(self): """fairseq vocabulary file under data root""" return self.config.get("src_vocab_filename", "src_dict.txt") @property def src_pre_tokenizer(self) -> Dict: """Pre-tokenizer to apply before subword tokenization. Returning a dictionary with `tokenizer` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" return self.config.get("src_pre_tokenizer", {"tokenizer": None}) @property def src_bpe_tokenizer(self) -> Dict: """Subword tokenizer to apply on source text after pre-tokenization. Returning a dictionary with `bpe` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" return self.config.get("src_bpe_tokenizer", {"bpe": None}) @property def prepend_tgt_lang_tag_no_change(self) -> bool: """Prepend target lang ID token as the prev_output_tokens BOS (e.g. for to-many multilingual setting). No change needed during inference. """ return self.config.get("prepend_tgt_lang_tag_no_change", False) @property def sampling_text_alpha(self): """Hyper-parameter alpha = 1/T for temperature-based resampling. (text input only) (alpha = 1 for no resampling)""" return self.config.get("sampling_text_alpha", 1.0) class SpeechToTextJointDatasetItem(NamedTuple): index: int source: torch.Tensor target: Optional[torch.Tensor] = None src_txt_tokens: Optional[torch.Tensor] = None tgt_lang_tag: Optional[int] = None src_lang_tag: Optional[int] = None tgt_alignment: Optional[torch.Tensor] = None # use_src_lang_id: # 0: don't use src_lang_id # 1: attach src_lang_id to the src_txt_tokens as eos class SpeechToTextJointDataset(SpeechToTextDataset): def __init__( self, split: str, is_train_split: bool, cfg: S2TJointDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, src_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, src_pre_tokenizer=None, src_bpe_tokenizer=None, append_eos: Optional[bool] = True, alignment: Optional[List[str]] = None, use_src_lang_id: Optional[int] = 0, ): super().__init__( split, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, append_eos=append_eos, ) self.src_dict = src_dict self.src_pre_tokenizer = src_pre_tokenizer self.src_bpe_tokenizer = src_bpe_tokenizer self.alignment = None self.use_src_lang_id = use_src_lang_id if alignment is not None: self.alignment = [ [float(s) for s in sample.split()] for sample in alignment ] def get_tokenized_src_text(self, index: int): text = self.tokenize(self.src_pre_tokenizer, self.src_texts[index]) text = self.tokenize(self.src_bpe_tokenizer, text) return text def __getitem__(self, index: int) -> SpeechToTextJointDatasetItem: s2t_dataset_item = super().__getitem__(index) src_tokens = None src_lang_tag = None if self.src_texts is not None and self.src_dict is not None: src_tokens = self.get_tokenized_src_text(index) src_tokens = self.src_dict.encode_line( src_tokens, add_if_not_exist=False, append_eos=True ).long() if self.use_src_lang_id > 0: src_lang_tag = self.get_lang_tag_idx( self.src_langs[index], self.src_dict ) tgt_lang_tag = None if self.cfg.prepend_tgt_lang_tag_no_change: # prepend_tgt_lang_tag_no_change: modify prev_output_tokens instead tgt_lang_tag = self.get_lang_tag_idx(self.tgt_langs[index], self.tgt_dict) ali = None if self.alignment is not None: ali = torch.Tensor(self.alignment[index]).float() return SpeechToTextJointDatasetItem( index=index, source=s2t_dataset_item.source, target=s2t_dataset_item.target, src_txt_tokens=src_tokens, tgt_lang_tag=tgt_lang_tag, src_lang_tag=src_lang_tag, tgt_alignment=ali, ) def __len__(self): return self.n_samples def collater(self, samples: List[SpeechToTextJointDatasetItem]) -> Dict: s2t_out = super().collater(samples, return_order=True) if s2t_out == {}: return s2t_out net_input, order = s2t_out["net_input"], s2t_out["order"] if self.src_texts is not None and self.src_dict is not None: src_txt_tokens = fairseq_data_utils.collate_tokens( [x.src_txt_tokens for x in samples], self.src_dict.pad(), self.src_dict.eos(), left_pad=False, move_eos_to_beginning=False, ) src_txt_lengths = torch.tensor( [x.src_txt_tokens.size()[0] for x in samples], dtype=torch.long ) if self.use_src_lang_id > 0: src_lang_idxs = torch.tensor( [s.src_lang_tag for s in samples], dtype=src_txt_tokens.dtype ) if self.use_src_lang_id == 1: # replace eos with lang_id eos_idx = src_txt_lengths - 1 src_txt_tokens.scatter_( 1, eos_idx.view(-1, 1), src_lang_idxs.view(-1, 1) ) else: raise NotImplementedError("Implementation is required") src_txt_tokens = src_txt_tokens.index_select(0, order) src_txt_lengths = src_txt_lengths.index_select(0, order) net_input["src_txt_tokens"] = src_txt_tokens net_input["src_txt_lengths"] = src_txt_lengths net_input["alignment"] = None if self.alignment is not None: max_len = max([s.tgt_alignment.size(0) for s in samples]) alignment = torch.ones(len(samples), max_len).float() for i, s in enumerate(samples): cur_len = s.tgt_alignment.size(0) alignment[i][:cur_len].copy_(s.tgt_alignment) net_input["alignment"] = alignment.index_select(0, order) if self.tgt_texts is not None and samples[0].tgt_lang_tag is not None: for i in range(len(samples)): net_input["prev_output_tokens"][i][0] = samples[order[i]].tgt_lang_tag out = { "id": s2t_out["id"], "net_input": net_input, "target": s2t_out["target"], "target_lengths": s2t_out["target_lengths"], "ntokens": s2t_out["ntokens"], "nsentences": len(samples), } return out class SpeechToTextJointDatasetCreator(SpeechToTextDatasetCreator): KEY_ALIGN = "align" @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TJointDataConfig, tgt_dict, src_dict, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, append_eos, use_src_lang_id, ) -> SpeechToTextJointDataset: audio_root = Path(cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples] n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples] tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples] src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples] speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples] src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples] tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples] tgt_alignment = None if cls.KEY_ALIGN in samples[0].keys(): tgt_alignment = [s[cls.KEY_ALIGN] for s in samples] return SpeechToTextJointDataset( split_name, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, src_dict=src_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, src_pre_tokenizer=src_pre_tokenizer, src_bpe_tokenizer=src_bpe_tokenizer, append_eos=append_eos, alignment=tgt_alignment, use_src_lang_id=use_src_lang_id, ) @classmethod def _from_tsv( cls, root: str, cfg: S2TJointDataConfig, split: str, tgt_dict, src_dict, is_train_split: bool, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, append_eos: bool, use_src_lang_id: int, ) -> SpeechToTextJointDataset: samples = cls._load_samples_from_tsv(root, split) return cls._from_list( split, is_train_split, samples, cfg, tgt_dict, src_dict, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, append_eos, use_src_lang_id, ) @classmethod def from_tsv( cls, root: str, cfg: S2TJointDataConfig, splits: str, tgt_dict, src_dict, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, append_eos: Optional[bool] = True, use_src_lang_id: Optional[int] = 0, ) -> SpeechToTextJointDataset: datasets = [ cls._from_tsv( root, cfg, split, tgt_dict, src_dict, is_train_split, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, append_eos=append_eos, use_src_lang_id=use_src_lang_id, ) for split in splits.split(",") ] if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0: # temperature-based sampling size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha) datasets = [ ResamplingDataset( d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0) ) for r, d in zip(size_ratios, datasets) ] return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
12,544
33.847222
86
py
sign-topic
sign-topic-main/fairseq/data/audio/feature_transforms/delta_deltas.py
import numpy as np import torch from fairseq.data.audio.feature_transforms import ( AudioFeatureTransform, register_audio_feature_transform, ) @register_audio_feature_transform("delta_deltas") class DeltaDeltas(AudioFeatureTransform): """Expand delta-deltas features from spectrum.""" @classmethod def from_config_dict(cls, config=None): _config = {} if config is None else config return DeltaDeltas(_config.get("win_length", 5)) def __init__(self, win_length=5): self.win_length = win_length def __repr__(self): return self.__class__.__name__ def __call__(self, spectrogram): from torchaudio.functional import compute_deltas assert len(spectrogram.shape) == 2, "spectrogram must be a 2-D tensor." # spectrogram is T x F, while compute_deltas takes (…, F, T) spectrogram = torch.from_numpy(spectrogram).transpose(0, 1) delta = compute_deltas(spectrogram) delta_delta = compute_deltas(delta) out_feat = np.concatenate( [spectrogram, delta.numpy(), delta_delta.numpy()], axis=0 ) out_feat = np.transpose(out_feat) return out_feat
1,192
30.394737
79
py
sign-topic
sign-topic-main/fairseq/data/encoders/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.data import encoders def get_whole_word_mask(args, dictionary): bpe = encoders.build_bpe(args) if bpe is not None: def is_beginning_of_word(i): if i < dictionary.nspecial: # special elements are always considered beginnings return True tok = dictionary[i] if tok.startswith("madeupword"): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor( list(map(is_beginning_of_word, range(len(dictionary)))) ) return mask_whole_words return None
909
28.354839
67
py
sign-topic
sign-topic-main/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import mmap import os import shutil import struct import typing as tp from functools import lru_cache import numpy as np import torch from fairseq.data import indexed_dataset from fairseq.data.huffman import HuffmanCoder from fairseq.file_io import PathManager class HuffmanMMapIndex: """ keep an index of the offsets in the huffman binary file. First a header, then the list of sizes (num tokens) for each instance and finally the addresses of each instance. """ _HDR_MAGIC = b"HUFFIDX\x00\x00" _VERSION = 1 @classmethod def writer(cls, path: str, data_len: int): class _Writer: def __enter__(self): self._file = open(path, "wb") # write header (magic + version) self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", cls._VERSION)) self._file.write(struct.pack("<Q", data_len)) return self def write(self, sizes, pointers): # add number of items in the index to the header self._file.write(struct.pack("<Q", len(sizes))) # write sizes sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes # write address pointers pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: # read headers magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) (version,) = struct.unpack("<Q", stream.read(8)) assert ( self._VERSION == version ), "Unexpected file version f{version} != code version f{self._VERSION}" # read length of data file (self._data_len,) = struct.unpack("<Q", stream.read(8)) # read number of items in data file/index (self._len,) = struct.unpack("<Q", stream.read(8)) offset = stream.tell() indexed_dataset._warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap def __iter__(self): for i in range(self._len): yield self[i] @property def data_len(self): return self._data_len @property def sizes(self): return self._sizes @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def vocab_file_path(prefix_path): return prefix_path + ".vocab" class HuffmanMMapIndexedDataset(torch.utils.data.Dataset): """ an indexed dataset that use mmap and memoryview to access data from disk that was compressed with a HuffmanCoder. """ def __init__(self, prefix_path): super().__init__() self._prefix_path = None self._index = None self._bin_buffer = None self._coder = None self._file = None self._bin_buffer_mmap = None self._do_init(prefix_path) def __getstate__(self): return self._prefix_path def __setstate__(self, state): self._do_init(state) def _do_init(self, prefix_path): self._prefix_path = prefix_path self._index = HuffmanMMapIndex( indexed_dataset.index_file_path(self._prefix_path) ) self._coder = HuffmanCoder.from_file(vocab_file_path(self._prefix_path)) indexed_dataset._warmup_mmap_file( indexed_dataset.data_file_path(self._prefix_path) ) self._file = os.open( indexed_dataset.data_file_path(self._prefix_path), os.O_RDONLY ) self._bin_buffer_mmap = mmap.mmap( self._file, self._index.data_len, access=mmap.ACCESS_READ, ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): del self._bin_buffer if self._file: os.close(self._file) del self._index def __len__(self): return len(self._index) def _decode(self, i): ptr, _ = self._index[i] if i == 0: raw_bytes = self._bin_buffer[:ptr] else: (prev_ptr, _) = self._index[i - 1] raw_bytes = self._bin_buffer[prev_ptr:ptr] return self._coder.decode(raw_bytes.tobytes()) @lru_cache(maxsize=8) def __getitem__(self, i): nodes = self._decode(i) return torch.tensor([n.id for n in nodes], dtype=torch.int64) def __iter__(self): for idx in range(len(self)): yield self[idx] def get_symbols(self, i): nodes = self._decode(i) for n in nodes: yield n.symbol @property def sizes(self): return self._index.sizes @property def supports_prefetch(self): return False @property def coder(self): return self._coder @staticmethod def exists(prefix_path): return ( PathManager.exists(indexed_dataset.index_file_path(prefix_path)) and PathManager.exists(indexed_dataset.data_file_path(prefix_path)) and PathManager.exists(vocab_file_path(prefix_path)) ) class HuffmanMMapIndexedDatasetBuilder: """ Helper to build a memory mapped datasets with a huffman encoder. You can either open/close this manually or use it as a ContextManager. Provide your own coder, it will then be stored alongside the dataset. The builder will first write the vocab file, then open the binary file so you can stream into it, finally the index will be written when the builder is closed (your index should fit in memory). """ def __init__(self, path_prefix: str, coder: HuffmanCoder) -> None: self._path_prefix = path_prefix self._coder = coder self._sizes = [] self._ptrs = [] self._data_len = 0 def open(self): self._coder.to_file(vocab_file_path(self._path_prefix)) self._data_file = open(indexed_dataset.data_file_path(self._path_prefix), "wb") def __enter__(self) -> "HuffmanMMapIndexedDatasetBuilder": self.open() return self def add_item(self, tokens: tp.List[str]) -> None: """ add a list of tokens to the dataset, they will compressed with the provided coder before being written to file. """ encoded = self._coder.encode(tokens) code_len = len(encoded) last_ptr = 0 if len(self._ptrs) > 0: last_ptr = self._ptrs[-1] self._sizes.append(len(tokens)) self._ptrs.append(last_ptr + code_len) self._data_len += code_len self._data_file.write(encoded) def append(self, other_dataset_path_prefix: str) -> None: """ append an existing dataset. Beware, if it wasn't built with the same coder, you are in trouble. """ other_index = HuffmanMMapIndex( indexed_dataset.index_file_path(other_dataset_path_prefix) ) for (ptr, size) in other_index: self._ptrs.append(ptr + self._data_len) self._sizes.append(size) # Concatenate data with open(indexed_dataset.data_file_path(other_dataset_path_prefix), "rb") as f: shutil.copyfileobj(f, self._data_file) self._data_len += other_index.data_len def close(self): self._data_file.close() with HuffmanMMapIndex.writer( indexed_dataset.index_file_path(self._path_prefix), self._data_len ) as index: index.write(self._sizes, self._ptrs) def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close()
8,809
29.590278
108
py
sign-topic
sign-topic-main/fairseq/data/legacy/block_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import numpy as np import torch from fairseq.data import FairseqDataset class BlockPairDataset(FairseqDataset): """Break a Dataset of tokens into sentence pair blocks for next sentence prediction as well as masked language model. High-level logics are: 1. break input tensor to tensor blocks 2. pair the blocks with 50% next sentence and 50% random sentence 3. return paired blocks as well as related segment labels Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes: array of sentence lengths dictionary: dictionary for the task block_size: maximum block size break_mode: mode for breaking copurs into block pairs. currently we support 2 modes doc: respect document boundaries and each part of the pair should belong to on document none: don't respect any boundary and cut tokens evenly short_seq_prob: probability for generating shorter block pairs doc_break_size: Size for empty line separating documents. Typically 1 if the sentences have eos, 0 otherwise. """ def __init__( self, dataset, dictionary, sizes, block_size, break_mode="doc", short_seq_prob=0.1, doc_break_size=1, ): super().__init__() self.dataset = dataset self.pad = dictionary.pad() self.eos = dictionary.eos() self.cls = dictionary.cls() self.mask = dictionary.mask() self.sep = dictionary.sep() self.break_mode = break_mode self.dictionary = dictionary self.short_seq_prob = short_seq_prob self.block_indices = [] assert len(dataset) == len(sizes) if break_mode == "doc": cur_doc = [] for sent_id, sz in enumerate(sizes): assert doc_break_size == 0 or sz != 0, ( "when doc_break_size is non-zero, we expect documents to be" "separated by a blank line with a single eos." ) # empty line as document separator if sz == doc_break_size: if len(cur_doc) == 0: continue self.block_indices.append(cur_doc) cur_doc = [] else: cur_doc.append(sent_id) max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP] self.sent_pairs = [] self.sizes = [] for doc_id, doc in enumerate(self.block_indices): self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes) elif break_mode is None or break_mode == "none": # each block should have half of the block size since we are constructing block pair sent_length = (block_size - 3) // 2 total_len = sum(dataset.sizes) length = math.ceil(total_len / sent_length) def block_at(i): start = i * sent_length end = min(start + sent_length, total_len) return (start, end) sent_indices = np.array([block_at(i) for i in range(length)]) sent_sizes = np.array([e - s for s, e in sent_indices]) dataset_index = self._sent_to_dataset_index(sent_sizes) # pair sentences self._pair_sentences(dataset_index) else: raise ValueError("Invalid break_mode: " + break_mode) def _pair_sentences(self, dataset_index): """ Give a list of evenly cut blocks/sentences, pair these sentences with 50% consecutive sentences and 50% random sentences. This is used for none break mode """ # pair sentences for sent_id, sent in enumerate(dataset_index): next_sent_label = ( 1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0 ) if next_sent_label: next_sent = dataset_index[sent_id + 1] else: next_sent = dataset_index[ self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1]) ] self.sent_pairs.append((sent, next_sent, next_sent_label)) # The current blocks don't include the special tokens but the # sizes already account for this self.sizes.append(3 + sent[3] + next_sent[3]) def _sent_to_dataset_index(self, sent_sizes): """ Build index mapping block indices to the underlying dataset indices """ dataset_index = [] ds_idx, ds_remaining = -1, 0 for to_consume in sent_sizes: sent_size = to_consume if ds_remaining == 0: ds_idx += 1 ds_remaining = sent_sizes[ds_idx] start_ds_idx = ds_idx start_offset = sent_sizes[ds_idx] - ds_remaining while to_consume > ds_remaining: to_consume -= ds_remaining ds_idx += 1 ds_remaining = sent_sizes[ds_idx] ds_remaining -= to_consume dataset_index.append( ( start_ds_idx, # starting index in dataset start_offset, # starting offset within starting index ds_idx, # ending index in dataset sent_size, # sentence length ) ) assert ds_remaining == 0 assert ds_idx == len(self.dataset) - 1 return dataset_index def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes): """ Go through a single document and genrate sentence paris from it """ current_chunk = [] current_length = 0 curr = 0 # To provide more randomness, we decrease target seq length for parts of # samples (10% by default). Note that max_num_tokens is the hard threshold # for batching and will never be changed. target_seq_length = max_num_tokens if np.random.random() < self.short_seq_prob: target_seq_length = np.random.randint(2, max_num_tokens) # loop through all sentences in document while curr < len(doc): sent_id = doc[curr] current_chunk.append(sent_id) current_length = sum(sizes[current_chunk]) # split chunk and generate pair when exceed target_seq_length or # finish the loop if curr == len(doc) - 1 or current_length >= target_seq_length: # split the chunk into 2 parts a_end = 1 if len(current_chunk) > 2: a_end = np.random.randint(1, len(current_chunk) - 1) sent_a = current_chunk[:a_end] len_a = sum(sizes[sent_a]) # generate next sentence label, note that if there is only 1 sentence # in current chunk, label is always 0 next_sent_label = ( 1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0 ) if not next_sent_label: # if next sentence label is 0, sample sent_b from a random doc target_b_length = target_seq_length - len_a rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id]) random_doc = self.block_indices[rand_doc_id] random_start = np.random.randint(0, len(random_doc)) sent_b = [] len_b = 0 for j in range(random_start, len(random_doc)): sent_b.append(random_doc[j]) len_b = sum(sizes[sent_b]) if len_b >= target_b_length: break # return the second part of the chunk since it's not used num_unused_segments = len(current_chunk) - a_end curr -= num_unused_segments else: # if next sentence label is 1, use the second part of chunk as sent_B sent_b = current_chunk[a_end:] len_b = sum(sizes[sent_b]) # currently sent_a and sent_B may be longer than max_num_tokens, # truncate them and return block idx and offsets for them sent_a, sent_b = self._truncate_sentences( sent_a, sent_b, max_num_tokens ) self.sent_pairs.append((sent_a, sent_b, next_sent_label)) self.sizes.append(3 + sent_a[3] + sent_b[3]) current_chunk = [] curr += 1 def _skip_sampling(self, total, skip_ids): """ Generate a random integer which is not in skip_ids. Sample range is [0, total) TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later """ rand_id = np.random.randint(total - len(skip_ids)) return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids) def _truncate_sentences(self, sent_a, sent_b, max_num_tokens): """ Trancate a pair of sentence to limit total length under max_num_tokens Logics: 1. Truncate longer sentence 2. Tokens to be truncated could be at the beginning or the end of the sentnce Returns: Truncated sentences represented by dataset idx """ len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b]) front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0 while True: total_length = ( len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b ) if total_length <= max_num_tokens: break if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b: if np.random.rand() < 0.5: front_cut_a += 1 else: end_cut_a += 1 else: if np.random.rand() < 0.5: front_cut_b += 1 else: end_cut_b += 1 # calculate ds indices as well as offsets and return truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a) truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b) return truncated_sent_a, truncated_sent_b def _cut_sentence(self, sent, front_cut, end_cut): """ Cut a sentence based on the numbers of tokens to be cut from beginning and end Represent the sentence as dataset idx and return """ start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0 target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut while front_cut > 0: if self.dataset.sizes[start_ds_idx] > front_cut: offset += front_cut break else: front_cut -= self.dataset.sizes[start_ds_idx] start_ds_idx += 1 while end_cut > 0: if self.dataset.sizes[end_ds_idx] > end_cut: break else: end_cut -= self.dataset.sizes[end_ds_idx] end_ds_idx -= 1 return start_ds_idx, offset, end_ds_idx, target_len def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length): """ Fetch a block of tokens based on its dataset idx """ buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) s, e = offset, offset + length return buffer[s:e] def __getitem__(self, index): block1, block2, next_sent_label = self.sent_pairs[index] block1 = self._fetch_block(*block1) block2 = self._fetch_block(*block2) return block1, block2, next_sent_label def __len__(self): return len(self.sizes) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): prefetch_idx = set() for index in indices: for block1, block2, _ in [self.sent_pairs[index]]: for ds_idx in range(block1[0], block1[2] + 1): prefetch_idx.add(ds_idx) for ds_idx in range(block2[0], block2[2] + 1): prefetch_idx.add(ds_idx) self.dataset.prefetch(prefetch_idx)
12,877
40.275641
99
py
sign-topic
sign-topic-main/fairseq/data/legacy/masked_lm_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Tuple import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset, data_utils from fairseq.data.concat_dataset import ConcatDataset from fairseq.data.legacy.block_pair_dataset import BlockPairDataset from fairseq.data.token_block_dataset import TokenBlockDataset class MaskedLMDataset(FairseqDataset): """ A wrapper Dataset for masked language modelling. The dataset wraps around TokenBlockDataset or BlockedPairDataset and creates a batch where the input blocks are masked according to the specified masking probability. Additionally the batch can also contain sentence level targets if this is specified. Args: dataset: Dataset which generates blocks of data. Only BlockPairDataset and TokenBlockDataset are supported. sizes: Sentence lengths vocab: Dictionary with the vocabulary and special tokens. pad_idx: Id of padding token in dictionary mask_idx: Id of mask token in dictionary classif_token_idx: Id of classification token in dictionary. This is the token associated with the sentence embedding (Eg: CLS for BERT) sep_token_idx: Id of separator token in dictionary (Eg: SEP in BERT) seed: Seed for random number generator for reproducibility. shuffle: Shuffle the elements before batching. has_pairs: Specifies whether the underlying dataset generates a pair of blocks along with a sentence_target or not. Setting it to True assumes that the underlying dataset generates a label for the pair of sentences which is surfaced as sentence_target. The default value assumes a single block with no sentence target. segment_id: An optional segment id for filling in the segment labels when we are in the single block setting (Eg: XLM). Default is 0. masking_ratio: specifies what percentage of the blocks should be masked. masking_prob: specifies the probability of a given token being replaced with the "MASK" token. random_token_prob: specifies the probability of a given token being replaced by a random token from the vocabulary. """ def __init__( self, dataset: FairseqDataset, sizes: np.ndarray, vocab: Dictionary, pad_idx: int, mask_idx: int, classif_token_idx: int, sep_token_idx: int, seed: int = 1, shuffle: bool = True, has_pairs: bool = True, segment_id: int = 0, masking_ratio: float = 0.15, masking_prob: float = 0.8, random_token_prob: float = 0.1, ): # Make sure the input datasets are the ones supported assert ( isinstance(dataset, TokenBlockDataset) or isinstance(dataset, BlockPairDataset) or isinstance(dataset, ConcatDataset) ), ( "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " "ConcatDataset" ) self.dataset = dataset self.sizes = np.array(sizes) self.vocab = vocab self.pad_idx = pad_idx self.mask_idx = mask_idx self.classif_token_idx = classif_token_idx self.sep_token_idx = sep_token_idx self.shuffle = shuffle self.seed = seed self.has_pairs = has_pairs self.segment_id = segment_id self.masking_ratio = masking_ratio self.masking_prob = masking_prob self.random_token_prob = random_token_prob # If we have only one block then sizes needs to be updated to include # the classification token if not has_pairs: self.sizes = self.sizes + 1 def __getitem__(self, index: int): # if has_pairs, then expect 2 blocks and a sentence target if self.has_pairs: (block_one, block_two, sentence_target) = self.dataset[index] else: block_one = self.dataset[index] return { "id": index, "block_one": block_one, "block_two": block_two if self.has_pairs else None, "sentence_target": sentence_target if self.has_pairs else None, } def __len__(self): return len(self.dataset) def _mask_block( self, sentence: np.ndarray, mask_idx: int, pad_idx: int, dictionary_token_range: Tuple, ): """ Mask tokens for Masked Language Model training Samples mask_ratio tokens that will be predicted by LM. Note:This function may not be efficient enough since we had multiple conversions between np and torch, we can replace them with torch operators later. Args: sentence: 1d tensor to be masked mask_idx: index to use for masking the sentence pad_idx: index to use for masking the target for tokens we aren't predicting dictionary_token_range: range of indices in dictionary which can be used for random word replacement (e.g. without special characters) Return: masked_sent: masked sentence target: target with words which we are not predicting replaced by pad_idx """ masked_sent = np.copy(sentence) sent_length = len(sentence) mask_num = math.ceil(sent_length * self.masking_ratio) mask = np.random.choice(sent_length, mask_num, replace=False) target = np.copy(sentence) for i in range(sent_length): if i in mask: rand = np.random.random() # replace with mask if probability is less than masking_prob # (Eg: 0.8) if rand < self.masking_prob: masked_sent[i] = mask_idx # replace with random token if probability is less than # masking_prob + random_token_prob (Eg: 0.9) elif rand < (self.masking_prob + self.random_token_prob): # sample random token from dictionary masked_sent[i] = np.random.randint( dictionary_token_range[0], dictionary_token_range[1] ) else: target[i] = pad_idx return masked_sent, target def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int): """ Does the heavy lifting for creating a batch from the input list of examples. The logic is as follows: 1. Mask the input blocks. In case has_pair is True then we have 2 blocks to mask. 2. Prepend the first masked block tensor with the special token used as sentence embedding. Eg: CLS in BERT. This happens irrespective of the value of has_pair. 3. If has_pair is True, then append the first masked block with the special separator token (eg: SEP for BERT) and compute segment label accordingly. In this case, also append the second masked block with this special separator token and compute its segment label. 4. For the targets tensor, prepend and append with padding index accordingly. 5. Concatenate all tensors. """ if len(samples) == 0: return {} # To ensure determinism, we reset the state of the PRNG after every # batch based on the seed and the first id of the batch. This ensures # that across epochs we get the same mask for the same example. This # is needed for reproducibility and is how BERT does masking # TODO: Can we add deteminism without this constraint? with data_utils.numpy_seed(self.seed + samples[0]["id"]): for s in samples: # token range is needed for replacing with random token during # masking token_range = (self.vocab.nspecial, len(self.vocab)) # mask according to specified probabilities. masked_blk_one, masked_tgt_one = self._mask_block( s["block_one"], self.mask_idx, self.pad_idx, token_range, ) tokens = np.concatenate([[self.classif_token_idx], masked_blk_one]) targets = np.concatenate([[self.pad_idx], masked_tgt_one]) segments = np.ones(len(tokens)) * self.segment_id # if has_pairs is True then we need to add the SEP token to both # the blocks after masking and re-compute segments based on the new # lengths. if self.has_pairs: tokens_one = np.concatenate([tokens, [self.sep_token_idx]]) targets_one = np.concatenate([targets, [self.pad_idx]]) masked_blk_two, masked_tgt_two = self._mask_block( s["block_two"], self.mask_idx, self.pad_idx, token_range ) tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]]) targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]]) # block + 1 sep + 1 special (CLS) segments_one = np.zeros(len(tokens_one)) # block + 1 sep segments_two = np.ones(len(tokens_two)) tokens = np.concatenate([tokens_one, tokens_two]) targets = np.concatenate([targets_one, targets_two]) segments = np.concatenate([segments_one, segments_two]) s["source"] = torch.LongTensor(tokens) s["segment_labels"] = torch.LongTensor(segments) s["lm_target"] = torch.LongTensor(targets) def merge(key): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False ) return { "id": torch.LongTensor([s["id"] for s in samples]), "ntokens": sum(len(s["source"]) for s in samples), "net_input": { "src_tokens": merge("source"), "segment_labels": merge("segment_labels"), }, "lm_target": merge("lm_target"), "sentence_target": torch.LongTensor([s["sentence_target"] for s in samples]) if self.has_pairs else None, "nsentences": len(samples), } def collater(self, samples: List[Dict]): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return self._collate(samples, self.vocab.pad(), self.vocab.eos()) def num_tokens(self, index: int): """ Return the number of tokens in a sample. This value is used to enforce max-tokens during batching. """ return self.sizes[index] def size(self, index: int): """ Return an example's size as a float or tuple. This value is used when filtering a dataset with max-positions. """ return self.sizes[index] def ordered_indices(self): """ Return an ordered list of indices. Batches will be constructed based on this order. """ if self.shuffle: return np.random.permutation(len(self)) else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch(indices)
12,168
39.029605
88
py
sign-topic
sign-topic-main/fairseq/tasks/text_to_speech.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import os.path as op import torch import torch.nn.functional as F import numpy as np from fairseq.data.audio.text_to_speech_dataset import TextToSpeechDatasetCreator from fairseq.tasks import register_task from fairseq.tasks.speech_to_text import SpeechToTextTask from fairseq.speech_generator import ( AutoRegressiveSpeechGenerator, NonAutoregressiveSpeechGenerator, TeacherForcingAutoRegressiveSpeechGenerator, ) logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) try: from tensorboardX import SummaryWriter except ImportError: logger.info("Please install tensorboardX: pip install tensorboardX") SummaryWriter = None @register_task("text_to_speech") class TextToSpeechTask(SpeechToTextTask): @staticmethod def add_args(parser): parser.add_argument("data", help="manifest root path") parser.add_argument( "--config-yaml", type=str, default="config.yaml", help="Configuration YAML filename (under manifest root)", ) parser.add_argument( "--max-source-positions", default=1024, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1200, type=int, metavar="N", help="max number of tokens in the target sequence", ) parser.add_argument("--n-frames-per-step", type=int, default=1) parser.add_argument("--eos-prob-threshold", type=float, default=0.5) parser.add_argument("--eval-inference", action="store_true") parser.add_argument("--eval-tb-nsample", type=int, default=8) parser.add_argument("--vocoder", type=str, default="griffin_lim") parser.add_argument("--spec-bwd-max-iter", type=int, default=8) def __init__(self, args, src_dict): super().__init__(args, src_dict) self.src_dict = src_dict self.sr = self.data_cfg.config.get("features").get("sample_rate") self.tensorboard_writer = None self.tensorboard_dir = "" if args.tensorboard_logdir and SummaryWriter is not None: self.tensorboard_dir = os.path.join(args.tensorboard_logdir, "valid_extra") def load_dataset(self, split, epoch=1, combine=False, **kwargs): is_train_split = split.startswith("train") pre_tokenizer = self.build_tokenizer(self.args) bpe_tokenizer = self.build_bpe(self.args) self.datasets[split] = TextToSpeechDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, self.src_dict, pre_tokenizer, bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed, n_frames_per_step=self.args.n_frames_per_step, speaker_to_id=self.speaker_to_id, ) @property def target_dictionary(self): return None @property def source_dictionary(self): return self.src_dict def get_speaker_embeddings_path(self): speaker_emb_path = None if self.data_cfg.config.get("speaker_emb_filename") is not None: speaker_emb_path = op.join( self.args.data, self.data_cfg.config.get("speaker_emb_filename") ) return speaker_emb_path @classmethod def get_speaker_embeddings(cls, args): embed_speaker = None if args.speaker_to_id is not None: if args.speaker_emb_path is None: embed_speaker = torch.nn.Embedding( len(args.speaker_to_id), args.speaker_embed_dim ) else: speaker_emb_mat = np.load(args.speaker_emb_path) assert speaker_emb_mat.shape[1] == args.speaker_embed_dim embed_speaker = torch.nn.Embedding.from_pretrained( torch.from_numpy(speaker_emb_mat), freeze=True, ) logger.info( f"load speaker embeddings from {args.speaker_emb_path}. " f"train embedding? {embed_speaker.weight.requires_grad}\n" f"embeddings:\n{speaker_emb_mat}" ) return embed_speaker def build_model(self, cfg): cfg.pitch_min = self.data_cfg.config["features"].get("pitch_min", None) cfg.pitch_max = self.data_cfg.config["features"].get("pitch_max", None) cfg.energy_min = self.data_cfg.config["features"].get("energy_min", None) cfg.energy_max = self.data_cfg.config["features"].get("energy_max", None) cfg.speaker_emb_path = self.get_speaker_embeddings_path() model = super().build_model(cfg) self.generator = None if getattr(cfg, "eval_inference", False): self.generator = self.build_generator([model], cfg) return model def build_generator(self, models, cfg, vocoder=None, **unused): if vocoder is None: vocoder = self.build_default_vocoder() model = models[0] if getattr(model, "NON_AUTOREGRESSIVE", False): return NonAutoregressiveSpeechGenerator(model, vocoder, self.data_cfg) else: generator = AutoRegressiveSpeechGenerator if getattr(cfg, "teacher_forcing", False): generator = TeacherForcingAutoRegressiveSpeechGenerator logger.info("Teacher forcing mode for generation") return generator( model, vocoder, self.data_cfg, max_iter=self.args.max_target_positions, eos_prob_threshold=self.args.eos_prob_threshold, ) def build_default_vocoder(self): from fairseq.models.text_to_speech.vocoder import get_vocoder vocoder = get_vocoder(self.args, self.data_cfg) if torch.cuda.is_available() and not self.args.cpu: vocoder = vocoder.cuda() else: vocoder = vocoder.cpu() return vocoder def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if getattr(self.args, "eval_inference", False): hypos, inference_losses = self.valid_step_with_inference( sample, model, self.generator ) for k, v in inference_losses.items(): assert k not in logging_output logging_output[k] = v picked_id = 0 if self.tensorboard_dir and (sample["id"] == picked_id).any(): self.log_tensorboard( sample, hypos[: self.args.eval_tb_nsample], model._num_updates, is_na_model=getattr(model, "NON_AUTOREGRESSIVE", False), ) return loss, sample_size, logging_output def valid_step_with_inference(self, sample, model, generator): hypos = generator.generate(model, sample, has_targ=True) losses = { "mcd_loss": 0.0, "targ_frames": 0.0, "pred_frames": 0.0, "nins": 0.0, "ndel": 0.0, } rets = batch_mel_cepstral_distortion( [hypo["targ_waveform"] for hypo in hypos], [hypo["waveform"] for hypo in hypos], self.sr, normalize_type=None, ) for d, extra in rets: pathmap = extra[-1] losses["mcd_loss"] += d.item() losses["targ_frames"] += pathmap.size(0) losses["pred_frames"] += pathmap.size(1) losses["nins"] += (pathmap.sum(dim=1) - 1).sum().item() losses["ndel"] += (pathmap.sum(dim=0) - 1).sum().item() return hypos, losses def log_tensorboard(self, sample, hypos, num_updates, is_na_model=False): if self.tensorboard_writer is None: self.tensorboard_writer = SummaryWriter(self.tensorboard_dir) tb_writer = self.tensorboard_writer for b in range(len(hypos)): idx = sample["id"][b] text = sample["src_texts"][b] targ = hypos[b]["targ_feature"] pred = hypos[b]["feature"] attn = hypos[b]["attn"] if is_na_model: data = plot_tts_output( [targ.transpose(0, 1), pred.transpose(0, 1)], [f"target (idx={idx})", "output"], attn, "alignment", ret_np=True, suptitle=text, ) else: eos_prob = hypos[b]["eos_prob"] data = plot_tts_output( [targ.transpose(0, 1), pred.transpose(0, 1), attn], [f"target (idx={idx})", "output", "alignment"], eos_prob, "eos prob", ret_np=True, suptitle=text, ) tb_writer.add_image( f"inference_sample_{b}", data, num_updates, dataformats="HWC" ) if hypos[b]["waveform"] is not None: targ_wave = hypos[b]["targ_waveform"].detach().cpu().float() pred_wave = hypos[b]["waveform"].detach().cpu().float() tb_writer.add_audio( f"inference_targ_{b}", targ_wave, num_updates, sample_rate=self.sr ) tb_writer.add_audio( f"inference_pred_{b}", pred_wave, num_updates, sample_rate=self.sr ) def save_figure_to_numpy(fig): data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) return data DEFAULT_V_MIN = np.log(1e-5) def plot_tts_output( data_2d, title_2d, data_1d, title_1d, figsize=(24, 4), v_min=DEFAULT_V_MIN, v_max=3, ret_np=False, suptitle="", ): try: import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable except ImportError: raise ImportError("Please install Matplotlib: pip install matplotlib") data_2d = [ x.detach().cpu().float().numpy() if isinstance(x, torch.Tensor) else x for x in data_2d ] fig, axes = plt.subplots(1, len(data_2d) + 1, figsize=figsize) if suptitle: fig.suptitle(suptitle[:400]) # capped at 400 chars axes = [axes] if len(data_2d) == 0 else axes for ax, x, name in zip(axes, data_2d, title_2d): ax.set_title(name) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) im = ax.imshow( x, origin="lower", aspect="auto", vmin=max(x.min(), v_min), vmax=min(x.max(), v_max), ) fig.colorbar(im, cax=cax, orientation="vertical") if isinstance(data_1d, torch.Tensor): data_1d = data_1d.detach().cpu().numpy() axes[-1].plot(data_1d) axes[-1].set_title(title_1d) plt.tight_layout() if ret_np: fig.canvas.draw() data = save_figure_to_numpy(fig) plt.close(fig) return data def antidiag_indices(offset, min_i=0, max_i=None, min_j=0, max_j=None): """ for a (3, 4) matrix with min_i=1, max_i=3, min_j=1, max_j=4, outputs offset=2 (1, 1), offset=3 (2, 1), (1, 2) offset=4 (2, 2), (1, 3) offset=5 (2, 3) constraints: i + j = offset min_j <= j < max_j min_i <= offset - j < max_i """ if max_i is None: max_i = offset + 1 if max_j is None: max_j = offset + 1 min_j = max(min_j, offset - max_i + 1, 0) max_j = min(max_j, offset - min_i + 1, offset + 1) j = torch.arange(min_j, max_j) i = offset - j return torch.stack([i, j]) def batch_dynamic_time_warping(distance, shapes=None): """full batched DTW without any constraints distance: (batchsize, max_M, max_N) matrix shapes: (batchsize,) vector specifying (M, N) for each entry """ # ptr: 0=left, 1=up-left, 2=up ptr2dij = {0: (0, -1), 1: (-1, -1), 2: (-1, 0)} bsz, m, n = distance.size() cumdist = torch.zeros_like(distance) backptr = torch.zeros_like(distance).type(torch.int32) - 1 # initialize cumdist[:, 0, :] = distance[:, 0, :].cumsum(dim=-1) cumdist[:, :, 0] = distance[:, :, 0].cumsum(dim=-1) backptr[:, 0, :] = 0 backptr[:, :, 0] = 2 # DP with optimized anti-diagonal parallelization, O(M+N) steps for offset in range(2, m + n - 1): ind = antidiag_indices(offset, 1, m, 1, n) c = torch.stack( [ cumdist[:, ind[0], ind[1] - 1], cumdist[:, ind[0] - 1, ind[1] - 1], cumdist[:, ind[0] - 1, ind[1]], ], dim=2, ) v, b = c.min(axis=-1) backptr[:, ind[0], ind[1]] = b.int() cumdist[:, ind[0], ind[1]] = v + distance[:, ind[0], ind[1]] # backtrace pathmap = torch.zeros_like(backptr) for b in range(bsz): i = m - 1 if shapes is None else (shapes[b][0] - 1).item() j = n - 1 if shapes is None else (shapes[b][1] - 1).item() dtwpath = [(i, j)] while (i != 0 or j != 0) and len(dtwpath) < 10000: assert i >= 0 and j >= 0 di, dj = ptr2dij[backptr[b, i, j].item()] i, j = i + di, j + dj dtwpath.append((i, j)) dtwpath = dtwpath[::-1] indices = torch.from_numpy(np.array(dtwpath)) pathmap[b, indices[:, 0], indices[:, 1]] = 1 return cumdist, backptr, pathmap def compute_l2_dist(x1, x2): """compute an (m, n) L2 distance matrix from (m, d) and (n, d) matrices""" return torch.cdist(x1.unsqueeze(0), x2.unsqueeze(0), p=2).squeeze(0).pow(2) def compute_rms_dist(x1, x2): l2_dist = compute_l2_dist(x1, x2) return (l2_dist / x1.size(1)).pow(0.5) def get_divisor(pathmap, normalize_type): if normalize_type is None: return 1 elif normalize_type == "len1": return pathmap.size(0) elif normalize_type == "len2": return pathmap.size(1) elif normalize_type == "path": return pathmap.sum().item() else: raise ValueError(f"normalize_type {normalize_type} not supported") def batch_compute_distortion(y1, y2, sr, feat_fn, dist_fn, normalize_type): d, s, x1, x2 = [], [], [], [] for cur_y1, cur_y2 in zip(y1, y2): assert cur_y1.ndim == 1 and cur_y2.ndim == 1 cur_x1 = feat_fn(cur_y1) cur_x2 = feat_fn(cur_y2) x1.append(cur_x1) x2.append(cur_x2) cur_d = dist_fn(cur_x1, cur_x2) d.append(cur_d) s.append(d[-1].size()) max_m = max(ss[0] for ss in s) max_n = max(ss[1] for ss in s) d = torch.stack( [F.pad(dd, (0, max_n - dd.size(1), 0, max_m - dd.size(0))) for dd in d] ) s = torch.LongTensor(s).to(d.device) cumdists, backptrs, pathmaps = batch_dynamic_time_warping(d, s) rets = [] itr = zip(s, x1, x2, d, cumdists, backptrs, pathmaps) for (m, n), cur_x1, cur_x2, dist, cumdist, backptr, pathmap in itr: cumdist = cumdist[:m, :n] backptr = backptr[:m, :n] pathmap = pathmap[:m, :n] divisor = get_divisor(pathmap, normalize_type) distortion = cumdist[-1, -1] / divisor ret = distortion, (cur_x1, cur_x2, dist, cumdist, backptr, pathmap) rets.append(ret) return rets def batch_mel_cepstral_distortion(y1, y2, sr, normalize_type="path", mfcc_fn=None): """ https://arxiv.org/pdf/2011.03568.pdf The root mean squared error computed on 13-dimensional MFCC using DTW for alignment. MFCC features are computed from an 80-channel log-mel spectrogram using a 50ms Hann window and hop of 12.5ms. y1: list of waveforms y2: list of waveforms sr: sampling rate """ try: import torchaudio except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") if mfcc_fn is None or mfcc_fn.sample_rate != sr: melkwargs = { "n_fft": int(0.05 * sr), "win_length": int(0.05 * sr), "hop_length": int(0.0125 * sr), "f_min": 20, "n_mels": 80, "window_fn": torch.hann_window, } mfcc_fn = torchaudio.transforms.MFCC( sr, n_mfcc=13, log_mels=True, melkwargs=melkwargs ).to(y1[0].device) return batch_compute_distortion( y1, y2, sr, lambda y: mfcc_fn(y).transpose(-1, -2), compute_rms_dist, normalize_type, )
17,216
33.296813
88
py
sign-topic
sign-topic-main/fairseq/tasks/signs_to_text.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from argparse import Namespace import json import pandas as pd import numpy as np from fairseq.data import AddTargetDataset, Dictionary, encoders from dataclasses import dataclass, field from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.constants import ChoiceEnum from typing import Optional, Any from omegaconf import MISSING, II, DictConfig from fairseq.data.sign_language import ( SignFeatsType, SignFeatsDataset, ) from fairseq.tasks import FairseqTask, register_task from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel from fairseq import metrics, search, tokenizer, utils import pdb logger = logging.getLogger(__name__) EVAL_BLEU_ORDER = 4 @dataclass class SignsToTextConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={"help": "path to data directory"}) max_source_positions: Optional[int] = field( default=750, metadata={"help": "max number of tokens in the source sequence"} ) min_source_positions: Optional[int] = field( default=50, metadata={"help": "min number of tokens in the source sequence"} ) max_target_positions: Optional[int] = field( default=512, metadata={"help": "max number of tokens in the target sequence"} ) normalize: bool = field( default=False, metadata={"help": "if set, normalizes input to have 0 mean and unit variance"}, ) body_parts: str = field( default = "face,upperbody,lefthand,righthand", metadata={"help": "Select the keypoints that you want to use. Options: 'face','upperbody','lowerbody','lefthand', 'righthand'"}, ) feat_dims: str = field( default = "0,1,2", metadata={"help": "Select the keypoints dimensions that you want to use. Options: 0, 1, 2, 3"}, ) tokenizer_type: str = field( default='sentencepiece', metadata={"help": "subword tokenizer type"}, ) tokenizer_vocab: str = field( default=MISSING, metadata={"help": "subword tokenizer file"}, ) shuffle_dataset: bool = field( default=True, metadata={"help": "set True to shuffle the dataset between epochs"}, ) num_batch_buckets: int = field( default=0, metadata={"help": "number of buckets"}, ) text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field( default="none", metadata={ "help": "compression level for texts (e.g. audio filenames, " "target texts): none/low/high (default: none). " }, ) feats_type: ChoiceEnum([x.name for x in SignFeatsType]) = field( default="keypoints", metadata={ "help": "type of features for the sign input data: keypoints/i3d (default: keypoints). " }, ) tpu: bool = II("common.tpu") bpe_sentencepiece_model: str = II("bpe.sentencepiece_model") #add the following for reporting BLEU during validation eval_bleu: bool = field( default=False, metadata={"help": "evaluation with BLEU scores"} ) eval_bleu_args: Optional[str] = field( default="{}", metadata={ "help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string' }, ) eval_tokenized_bleu: bool = field( default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"} ) eval_bleu_print_samples: bool = field( default=True, metadata={"help": "print sample generations during validation"} ) @register_task("signs_to_text", dataclass=SignsToTextConfig) class SignsToTextTask(FairseqTask): def __init__(self, cfg, tgt_dict): super().__init__(cfg) self.tgt_dict = tgt_dict self.bpe_tokenizer = self.build_bpe( Namespace( bpe='sentencepiece', sentencepiece_model=cfg.bpe_sentencepiece_model ) ) @classmethod def setup_task(cls, cfg): dict_path = Path(cfg.bpe_sentencepiece_model).with_suffix('.txt') if not dict_path.is_file(): raise FileNotFoundError(f"Dict not found: {dict_path.as_posix()}") tgt_dict = Dictionary.load(dict_path.as_posix()) logger.info( f"dictionary size ({dict_path.name}): " f"{len(tgt_dict):,}" ) return cls(cfg, tgt_dict) def load_dataset(self, split, epoch=1, combine=False, **kwargs): root_dir = Path(self.cfg.data) assert root_dir.is_dir(), f"{root_dir} does not exist" # TODO: Change when we add i3d features manifest_file = root_dir / f"{split}_filt.tsv" if SignFeatsType(self.cfg.feats_type) == SignFeatsType.keypoints: feats_file = root_dir / f"{split}_sent.h5" elif SignFeatsType(self.cfg.feats_type) == SignFeatsType.i3d: if split =='train': manifest_file = root_dir / f"{split}_filt_i3d.tsv" #remove the ones not in the h5 file feats_file = root_dir / f"{split}_i3d.h5" #Check if this is at sentence level, because I don't think so... elif SignFeatsType(self.cfg.feats_type) == SignFeatsType.CNN2d: feats_file = root_dir / f'{split}_sent.h5' else: raise NotImplementedError("Features other than CNN2d, i3d or keypoints are not implemented") if self.cfg.num_batch_buckets > 0 or self.cfg.tpu: raise NotImplementedError("Pending to implement bucket_pad_length_dataset wrapper") self.datasets[split] = SignFeatsDataset.from_manifest_file( manifest_file=manifest_file, feats_file=feats_file, feats_type=self.cfg.feats_type, bodyparts=self.cfg.body_parts.split(','), feat_dims=[int(d) for d in self.cfg.feat_dims.split(',')], min_sample_size=self.cfg.min_source_positions, max_sample_size=self.cfg.max_source_positions, shuffle=self.cfg.shuffle_dataset, normalize=self.cfg.normalize, text_compression_level=self.cfg.text_compression_level, ) data = pd.read_csv(manifest_file, sep="\t") text_compressor = TextCompressor(level=self.cfg.text_compression_level) labels = [ text_compressor.compress(row['SENTENCE']) #added this for i, row in data.iterrows() if row['SENTENCE_NAME'] not in self.datasets[split].skipped_ids ] assert len(labels) == len(self.datasets[split]), ( f"labels length ({len(labels)}) and dataset length " f"supposed to skip ({len(self.datasets[split].skipped_ids)}) ids" f"({len(self.datasets[split])}) do not match" ) def process_label_fn(label): return self.target_dictionary.encode_line( self.bpe_tokenizer.encode(label), append_eos=False, add_if_not_exist=False ) def label_len_fn(label): return len(self.bpe_tokenizer.encode(label)) self.datasets[split] = AddTargetDataset( self.datasets[split], labels, pad=self.target_dictionary.pad(), eos=self.target_dictionary.eos(), batch_targets=True, process_label=process_label_fn, label_len_fn=label_len_fn, add_to_input=True, text_compression_level=self.cfg.text_compression_level, ) #Add this for validation def build_model(self, cfg, from_checkpoint=False): model = super().build_model(cfg) if self.cfg.eval_bleu: gen_args = json.loads(self.cfg.eval_bleu_args) self.sequence_generator = self.build_generator( [model], Namespace(**gen_args) ) return model #Add this for validation def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.cfg.eval_bleu: bleu = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output["_bleu_sys_len"] = bleu.sys_len logging_output["_bleu_ref_len"] = bleu.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(bleu.counts) == EVAL_BLEU_ORDER for i in range(EVAL_BLEU_ORDER): logging_output["_bleu_counts_" + str(i)] = bleu.counts[i] logging_output["_bleu_totals_" + str(i)] = bleu.totals[i] return loss, sample_size, logging_output @property def target_dictionary(self): return self.tgt_dict @property def source_dictionary(self): return None def max_positions(self): return self.cfg.max_source_positions, self.cfg.max_target_positions def get_interactive_tokens_and_lengths(self, lines, encode_fn): n_frames = [] for l in lines: h5_file, _id = l.split(':') feats_file = h5py.File(h5_file, "r") n_frames.append(np.array(feats_file[_id]).shape[0]) return lines, n_frames # TODO: Implement this method def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): raise NotImplementedError return SpeechToTextDataset( "interactive", False, self.data_cfg, src_tokens, src_lengths ) #Add this for validation def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.cfg.eval_bleu: def sum_logs(key): import torch result = sum(log.get(key, 0) for log in logging_outputs) if torch.is_tensor(result): result = result.cpu() return result counts, totals = [], [] for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs("_bleu_counts_" + str(i))) totals.append(sum_logs("_bleu_totals_" + str(i))) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar("_bleu_counts", np.array(counts)) metrics.log_scalar("_bleu_totals", np.array(totals)) metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len")) metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len")) def compute_bleu(meters): import inspect try: from sacrebleu.metrics import BLEU comp_bleu = BLEU.compute_bleu except ImportError: # compatibility API for sacrebleu 1.x import sacrebleu comp_bleu = sacrebleu.compute_bleu fn_sig = inspect.getfullargspec(comp_bleu)[0] if "smooth_method" in fn_sig: smooth = {"smooth_method": "exp"} else: smooth = {"smooth": "exp"} bleu = comp_bleu( correct=meters["_bleu_counts"].sum, total=meters["_bleu_totals"].sum, sys_len=meters["_bleu_sys_len"].sum, ref_len=meters["_bleu_ref_len"].sum, **smooth, ) return round(bleu.score, 2) metrics.log_derived("bleu", compute_bleu) def _inference_with_bleu(self, generator, sample, model): import sacrebleu #breakpoint() def decode(toks, escape_unk=False): s = self.tgt_dict.string( toks.int().cpu(), # The default unknown string in fairseq is `<unk>`, but # this is tokenized by sacrebleu as `< unk >`, inflating # BLEU scores. Instead, we use a somewhat more verbose # alternative that is unlikely to appear in the real # reference, but doesn't get split into multiple tokens. unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"), ) if self.bpe_tokenizer: s = self.bpe_tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]["tokens"])) refs.append( decode( utils.strip_pad(sample["target"][i], self.tgt_dict.pad()), escape_unk=True, # don't count <unk> as matches to the hypo ) ) if self.cfg.eval_bleu_print_samples: logger.info("example hypothesis: " + hyps[0]) logger.info("example reference: " + refs[0]) if self.cfg.eval_tokenized_bleu: return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none") else: return sacrebleu.corpus_bleu(hyps, [refs])
13,444
38.660767
136
py
sign-topic
sign-topic-main/fairseq/tasks/translation_from_pretrained_bart.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq import utils from fairseq.data import LanguagePairDataset from . import register_task from .translation import TranslationTask, load_langpair_dataset @register_task("translation_from_pretrained_bart") class TranslationFromPretrainedBARTTask(TranslationTask): """ Translate from source language to target language with a model initialized with a multilingual pretrain. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off TranslationTask.add_args(parser) parser.add_argument('--langs', type=str, metavar='LANG', help='comma-separated list of monolingual language, ' 'for example, "en,de,fr". These should match the ' 'langs from pretraining (and be in the same order). ' 'You should always add all pretraining language idx ' 'during finetuning.') parser.add_argument('--prepend-bos', action='store_true', help='prepend bos token to each sentence, which matches ' 'mBART pretraining') # fmt: on def __init__(self, args, src_dict, tgt_dict): super().__init__(args, src_dict, tgt_dict) self.langs = args.langs.split(",") for d in [src_dict, tgt_dict]: for l in self.langs: d.add_symbol("[{}]".format(l)) d.add_symbol("<mask>") def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.args.source_lang, self.args.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=getattr(self.args, "max_source_positions", 1024), max_target_positions=getattr(self.args, "max_target_positions", 1024), load_alignments=self.args.load_alignments, prepend_bos=getattr(self.args, "prepend_bos", False), append_source_id=True, ) def build_generator(self, models, args, **unused): if getattr(args, "score_reference", False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer( self.target_dictionary, eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)), ) else: from fairseq.sequence_generator import SequenceGenerator return SequenceGenerator( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), eos=self.tgt_dict.index("[{}]".format(self.args.target_lang)), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): src_lang_id = self.source_dictionary.index("[{}]".format(self.args.source_lang)) source_tokens = [] for s_t in src_tokens: s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)]) source_tokens.append(s_t) dataset = LanguagePairDataset( source_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints, ) return dataset
5,243
38.428571
108
py
sign-topic
sign-topic-main/fairseq/tasks/language_modeling.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from fairseq import utils from fairseq.data import ( AppendTokenDataset, Dictionary, IdDataset, LMContextWindowDataset, MonolingualDataset, NestedDictionaryDataset, NumelDataset, PadDataset, PrependTokenDataset, StripTokenDataset, TokenBlockDataset, TruncatedDictionary, data_utils, ) from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import LegacyFairseqTask, register_task from omegaconf import II SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"]) SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) logger = logging.getLogger(__name__) @dataclass class LanguageModelingConfig(FairseqDataclass): data: Optional[str] = field( default=None, metadata={"help": "path to data directory"} ) sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( default="none", metadata={ "help": 'If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.' }, ) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sample for LM dataset"}, ) output_dictionary_size: int = field( default=-1, metadata={"help": "limit the size of output dictionary"} ) self_target: bool = field(default=False, metadata={"help": "include self target"}) future_target: bool = field( default=False, metadata={"help": "include future target"} ) past_target: bool = field(default=False, metadata={"help": "include past target"}) add_bos_token: bool = field( default=False, metadata={"help": "prepend beginning of sentence token (<s>)"} ) max_target_positions: Optional[int] = field( default=None, metadata={"help": "max number of tokens in the target sequence"} ) shorten_method: SHORTEN_METHOD_CHOICES = field( default="none", metadata={ "help": "if not none, shorten sequences that exceed --tokens-per-sample" }, ) shorten_data_split_list: str = field( default="", metadata={ "help": "comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)' }, ) pad_to_fixed_length: Optional[bool] = field( default=False, metadata={"help": "pad to fixed length"}, ) pad_to_fixed_bsz: Optional[bool] = field( default=False, metadata={"help": "boolean to pad to fixed batch size"}, ) # TODO common vars below add to parent seed: int = II("common.seed") batch_size: Optional[int] = II("dataset.batch_size") batch_size_valid: Optional[int] = II("dataset.batch_size_valid") dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II( "dataset.dataset_impl" ) data_buffer_size: int = II("dataset.data_buffer_size") tpu: bool = II("common.tpu") use_plasma_view: bool = II("common.use_plasma_view") plasma_path: str = II("common.plasma_path") @register_task("language_modeling", dataclass=LanguageModelingConfig) class LanguageModelingTask(LegacyFairseqTask): """ Train a language model. Args: dictionary (~fairseq.data.Dictionary): the dictionary for the input of the language model output_dictionary (~fairseq.data.Dictionary): the dictionary for the output of the language model. In most cases it will be the same as *dictionary*, but could possibly be a more limited version of the dictionary (if ``--output-dictionary-size`` is used). targets (List[str]): list of the target types that the language model should predict. Can be one of "self", "future", and "past". Defaults to "future". .. note:: The language modeling task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate`, :mod:`fairseq-interactive` and :mod:`fairseq-eval-lm`. The language modeling task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.language_modeling_parser :prog: """ def __init__(self, args, dictionary, output_dictionary=None, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = output_dictionary or dictionary if targets is None: targets = ["future"] self.targets = targets @classmethod def setup_dictionary(cls, args, **kwargs): dictionary = None output_dictionary = None if args.data: paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) output_dictionary = dictionary if args.output_dictionary_size >= 0: output_dictionary = TruncatedDictionary( dictionary, args.output_dictionary_size ) return (dictionary, output_dictionary) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs) # upgrade old checkpoints if getattr(args, "exclude_self_target", False): args.self_target = False targets = [] if getattr(args, "self_target", False): targets.append("self") if getattr(args, "future_target", False): targets.append("future") if getattr(args, "past_target", False): targets.append("past") if len(targets) == 0: # standard language modeling targets = ["future"] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args): model = super().build_model(args) for target in self.targets: if target not in model.supported_targets: raise ValueError( "Unsupported language modeling target: {}".format(target) ) return model def load_dataset( self, split: str, epoch=1, combine=False, **kwargs ) -> MonolingualDataset: """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, valid1, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) # each process has its own copy of the raw data (likely to be an np.memmap) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl, combine=combine ) if dataset is None: raise FileNotFoundError(f"Dataset not found: {split} ({split_path})") dataset = maybe_shorten_dataset( dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, self.args.tokens_per_sample, self.args.seed, ) dataset = TokenBlockDataset( dataset, dataset.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True, use_plasma_view=self.args.use_plasma_view, split_path=split_path, plasma_path=self.args.plasma_path, ) add_eos_for_other_targets = ( self.args.sample_break_mode is not None and self.args.sample_break_mode != "none" ) fixed_pad_length = None if self.args.pad_to_fixed_length: fixed_pad_length = self.args.tokens_per_sample pad_to_bsz = None if self.args.pad_to_fixed_bsz: pad_to_bsz = ( self.args.batch_size_valid if "valid" in split else self.args.batch_size ) self.datasets[split] = MonolingualDataset( dataset=dataset, sizes=dataset.sizes, src_vocab=self.dictionary, tgt_vocab=self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True, targets=self.targets, add_bos_token=self.args.add_bos_token, fixed_pad_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, ) def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): """ Generate batches for inference. We prepend an eos token to src_tokens (or bos if `--add-bos-token` is set) and we append a <pad> to target. This is convenient both for generation with a prefix and LM scoring. """ dataset = StripTokenDataset( TokenBlockDataset( src_tokens, src_lengths, block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), # remove eos from (end of) target sequence self.source_dictionary.eos(), ) src_dataset = PrependTokenDataset( dataset, token=( self.source_dictionary.bos() if getattr(self.args, "add_bos_token", False) else self.source_dictionary.eos() ), ) tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad()) return NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": PadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": PadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False ), }, sizes=[np.array(src_lengths)], ) def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): # Generation will always be conditioned on bos_token if getattr(self.args, "add_bos_token", False): bos_token = self.source_dictionary.bos() else: bos_token = self.source_dictionary.eos() if constraints is not None: raise NotImplementedError( "Constrained decoding with the language_modeling task is not supported" ) # SequenceGenerator doesn't use src_tokens directly, we need to # pass the `prefix_tokens` argument instead if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] if prefix_tokens[:, 0].eq(bos_token).all(): prefix_tokens = prefix_tokens[:, 1:] return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token ) def eval_lm_dataloader( self, dataset, max_tokens: Optional[int] = 36000, batch_size: Optional[int] = None, max_positions: Optional[int] = None, num_shards: int = 1, shard_id: int = 0, num_workers: int = 1, data_buffer_size: int = 10, # ensures that every evaluated token has access to a context of at least # this size, if possible context_window: int = 0, ): if context_window > 0: dataset = LMContextWindowDataset( dataset=dataset, tokens_per_sample=self.args.tokens_per_sample, context_window=context_window, pad_idx=self.source_dictionary.pad(), ) return self.get_batch_iterator( dataset=dataset, max_tokens=max_tokens, max_sentences=batch_size, max_positions=max_positions, ignore_invalid_inputs=True, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, data_buffer_size=data_buffer_size, ).next_epoch_itr(shuffle=False) @property def source_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.dictionary @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.output_dictionary
13,912
35.231771
91
py
sign-topic
sign-topic-main/fairseq/tasks/translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import itertools import json import logging import os from typing import Optional from argparse import Namespace from omegaconf import II import numpy as np from fairseq import metrics, utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, LanguagePairDataset, PrependTokenDataset, StripTokenDataset, TruncateDataset, data_utils, encoders, indexed_dataset, ) from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import FairseqTask, register_task EVAL_BLEU_ORDER = 4 logger = logging.getLogger(__name__) def load_langpair_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, append_source_id=False, num_buckets=0, shuffle=True, pad_to_multiple=1, prepend_bos_src=None, ): def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") # infer langcode if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src)) else: if k > 0: break else: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) src_dataset = data_utils.load_indexed_dataset( prefix + src, src_dict, dataset_impl ) if truncate_source: src_dataset = AppendTokenDataset( TruncateDataset( StripTokenDataset(src_dataset, src_dict.eos()), max_source_positions - 1, ), src_dict.eos(), ) src_datasets.append(src_dataset) tgt_dataset = data_utils.load_indexed_dataset( prefix + tgt, tgt_dict, dataset_impl ) if tgt_dataset is not None: tgt_datasets.append(tgt_dataset) logger.info( "{} {} {}-{} {} examples".format( data_path, split_k, src, tgt, len(src_datasets[-1]) ) ) if not combine: break assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0 if len(src_datasets) == 1: src_dataset = src_datasets[0] tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None else: sample_ratios = [1] * len(src_datasets) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) if len(tgt_datasets) > 0: tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) else: tgt_dataset = None if prepend_bos: assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index") src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) if tgt_dataset is not None: tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) elif prepend_bos_src is not None: logger.info(f"prepending src bos: {prepend_bos_src}") src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src) eos = None if append_source_id: src_dataset = AppendTokenDataset( src_dataset, src_dict.index("[{}]".format(src)) ) if tgt_dataset is not None: tgt_dataset = AppendTokenDataset( tgt_dataset, tgt_dict.index("[{}]".format(tgt)) ) eos = tgt_dict.index("[{}]".format(tgt)) align_dataset = None if load_alignments: align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset( align_path, None, dataset_impl ) tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None return LanguagePairDataset( src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset_sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, eos=eos, num_buckets=num_buckets, shuffle=shuffle, pad_to_multiple=pad_to_multiple, ) @dataclass class TranslationConfig(FairseqDataclass): data: Optional[str] = field( default=None, metadata={ "help": "colon separated path to data directories list, will be iterated upon during epochs " "in round-robin manner; however, valid and test data are always in the first directory " "to avoid the need for repeating them in all directories" }, ) source_lang: Optional[str] = field( default=None, metadata={ "help": "source language", "argparse_alias": "-s", }, ) target_lang: Optional[str] = field( default=None, metadata={ "help": "target language", "argparse_alias": "-t", }, ) load_alignments: bool = field( default=False, metadata={"help": "load the binarized alignments"} ) left_pad_source: bool = field( default=True, metadata={"help": "pad the source on the left"} ) left_pad_target: bool = field( default=False, metadata={"help": "pad the target on the left"} ) max_source_positions: int = field( default=1024, metadata={"help": "max number of tokens in the source sequence"} ) max_target_positions: int = field( default=1024, metadata={"help": "max number of tokens in the target sequence"} ) upsample_primary: int = field( default=-1, metadata={"help": "the amount of upsample primary dataset"} ) truncate_source: bool = field( default=False, metadata={"help": "truncate source to max-source-positions"} ) num_batch_buckets: int = field( default=0, metadata={ "help": "if >0, then bucket source and target lengths into " "N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations" }, ) train_subset: str = II("dataset.train_subset") dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II( "dataset.dataset_impl" ) required_seq_len_multiple: int = II("dataset.required_seq_len_multiple") # options for reporting BLEU during validation eval_bleu: bool = field( default=False, metadata={"help": "evaluation with BLEU scores"} ) eval_bleu_args: Optional[str] = field( default="{}", metadata={ "help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string' }, ) eval_bleu_detok: str = field( default="space", metadata={ "help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; " "use 'space' to disable detokenization; see fairseq.data.encoders for other options" }, ) eval_bleu_detok_args: Optional[str] = field( default="{}", metadata={"help": "args for building the tokenizer, if needed, as JSON string"}, ) eval_tokenized_bleu: bool = field( default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"} ) eval_bleu_remove_bpe: Optional[str] = field( default=None, metadata={ "help": "remove BPE before computing BLEU", "argparse_const": "@@ ", }, ) eval_bleu_print_samples: bool = field( default=False, metadata={"help": "print sample generations during validation"} ) @register_task("translation", dataclass=TranslationConfig) class TranslationTask(FairseqTask): """ Translate from one (source) language to another (target) language. Args: src_dict (~fairseq.data.Dictionary): dictionary for the source language tgt_dict (~fairseq.data.Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. """ cfg: TranslationConfig def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict): super().__init__(cfg) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, cfg: TranslationConfig, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ paths = utils.split_paths(cfg.data) assert len(paths) > 0 # find language pair automatically if cfg.source_lang is None or cfg.target_lang is None: cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0]) if cfg.source_lang is None or cfg.target_lang is None: raise Exception( "Could not infer language pair, please provide it explicitly" ) # load dictionaries src_dict = cls.load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang)) ) tgt_dict = cls.load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang)) ) assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict))) logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict))) return cls(cfg, src_dict, tgt_dict) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 if split != self.cfg.train_subset: # if not training data set, use the first shard for valid and test paths = paths[:1] data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.cfg.source_lang, self.cfg.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.cfg.dataset_impl, upsample_primary=self.cfg.upsample_primary, left_pad_source=self.cfg.left_pad_source, left_pad_target=self.cfg.left_pad_target, max_source_positions=self.cfg.max_source_positions, max_target_positions=self.cfg.max_target_positions, load_alignments=self.cfg.load_alignments, truncate_source=self.cfg.truncate_source, num_buckets=self.cfg.num_batch_buckets, shuffle=(split != "test"), pad_to_multiple=self.cfg.required_seq_len_multiple, ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): return LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints, ) def build_model(self, cfg): model = super().build_model(cfg) if self.cfg.eval_bleu: detok_args = json.loads(self.cfg.eval_bleu_detok_args) self.tokenizer = encoders.build_tokenizer( Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args) ) gen_args = json.loads(self.cfg.eval_bleu_args) self.sequence_generator = self.build_generator( [model], Namespace(**gen_args) ) return model def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.cfg.eval_bleu: bleu = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output["_bleu_sys_len"] = bleu.sys_len logging_output["_bleu_ref_len"] = bleu.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(bleu.counts) == EVAL_BLEU_ORDER for i in range(EVAL_BLEU_ORDER): logging_output["_bleu_counts_" + str(i)] = bleu.counts[i] logging_output["_bleu_totals_" + str(i)] = bleu.totals[i] return loss, sample_size, logging_output def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.cfg.eval_bleu: def sum_logs(key): import torch result = sum(log.get(key, 0) for log in logging_outputs) if torch.is_tensor(result): result = result.cpu() return result counts, totals = [], [] for i in range(EVAL_BLEU_ORDER): counts.append(sum_logs("_bleu_counts_" + str(i))) totals.append(sum_logs("_bleu_totals_" + str(i))) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar("_bleu_counts", np.array(counts)) metrics.log_scalar("_bleu_totals", np.array(totals)) metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len")) metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len")) def compute_bleu(meters): import inspect try: from sacrebleu.metrics import BLEU comp_bleu = BLEU.compute_bleu except ImportError: # compatibility API for sacrebleu 1.x import sacrebleu comp_bleu = sacrebleu.compute_bleu fn_sig = inspect.getfullargspec(comp_bleu)[0] if "smooth_method" in fn_sig: smooth = {"smooth_method": "exp"} else: smooth = {"smooth": "exp"} bleu = comp_bleu( correct=meters["_bleu_counts"].sum, total=meters["_bleu_totals"].sum, sys_len=meters["_bleu_sys_len"].sum, ref_len=meters["_bleu_ref_len"].sum, **smooth, ) return round(bleu.score, 2) metrics.log_derived("bleu", compute_bleu) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.cfg.max_source_positions, self.cfg.max_target_positions) @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.src_dict @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary`.""" return self.tgt_dict def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, escape_unk=False): s = self.tgt_dict.string( toks.int().cpu(), self.cfg.eval_bleu_remove_bpe, # The default unknown string in fairseq is `<unk>`, but # this is tokenized by sacrebleu as `< unk >`, inflating # BLEU scores. Instead, we use a somewhat more verbose # alternative that is unlikely to appear in the real # reference, but doesn't get split into multiple tokens. unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"), ) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]["tokens"])) refs.append( decode( utils.strip_pad(sample["target"][i], self.tgt_dict.pad()), escape_unk=True, # don't count <unk> as matches to the hypo ) ) if self.cfg.eval_bleu_print_samples: logger.info("example hypothesis: " + hyps[0]) logger.info("example reference: " + refs[0]) if self.cfg.eval_tokenized_bleu: return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none") else: return sacrebleu.corpus_bleu(hyps, [refs])
17,888
34.921687
108
py
sign-topic
sign-topic-main/fairseq/tasks/multilingual_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import numpy as np import torch from fairseq import utils from fairseq.data import ( ConcatDataset, Dictionary, IdDataset, MaskTokensDataset, NestedDictionaryDataset, NumelDataset, NumSamplesDataset, PadDataset, PrependTokenDataset, RawLabelDataset, ResamplingDataset, SortDataset, TokenBlockDataset, data_utils, encoders, ) from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("multilingual_masked_lm") class MultiLingualMaskedLMTask(LegacyFairseqTask): """Task for training masked language models (e.g., BERT, RoBERTa).""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "data", help="colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner", ) parser.add_argument( "--sample-break-mode", default="complete", choices=["none", "complete", "complete_doc", "eos"], help='If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.', ) parser.add_argument( "--tokens-per-sample", default=512, type=int, help="max number of total tokens over all segments " "per sample for BERT dataset", ) parser.add_argument( "--mask-prob", default=0.15, type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--leave-unmasked-prob", default=0.1, type=float, help="probability that a masked token is unmasked", ) parser.add_argument( "--random-token-prob", default=0.1, type=float, help="probability of replacing a token with a random token", ) parser.add_argument( "--freq-weighted-replacement", action="store_true", help="sample random replacement words based on word frequencies", ) parser.add_argument( "--mask-whole-words", default=False, action="store_true", help="mask whole words; you may also want to set --bpe", ) parser.add_argument( "--multilang-sampling-alpha", type=float, default=1.0, help="smoothing alpha for sample rations across multiple datasets", ) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed # add mask token self.mask_idx = dictionary.add_symbol("<mask>") @classmethod def setup_task(cls, args, **kwargs): paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) return cls(args, dictionary) def _get_whole_word_mask(self): # create masked input and targets if self.args.mask_whole_words: bpe = encoders.build_bpe(self.args) if bpe is not None: def is_beginning_of_word(i): if i < self.source_dictionary.nspecial: # special elements are always considered beginnings return True tok = self.source_dictionary[i] if tok.startswith("madeupword"): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor( list(map(is_beginning_of_word, range(len(self.source_dictionary)))) ) else: mask_whole_words = None return mask_whole_words def _get_sample_prob(self, dataset_lens): """ Get smoothed sampling porbability by languages. This helps low resource languages by upsampling them. """ prob = dataset_lens / dataset_lens.sum() smoothed_prob = prob ** self.args.multilang_sampling_alpha smoothed_prob = smoothed_prob / smoothed_prob.sum() return smoothed_prob def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] languages = sorted( name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)) ) logger.info("Training on {0} languages: {1}".format(len(languages), languages)) logger.info( "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)} ) mask_whole_words = self._get_whole_word_mask() lang_datasets = [] for lang_id, language in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset( split_path, self.source_dictionary, self.args.dataset_impl, combine=combine, ) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) # create continuous blocks of tokens dataset = TokenBlockDataset( dataset, dataset.sizes, self.args.tokens_per_sample - 1, # one less for <s> pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode, ) logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) # prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) src_dataset, tgt_dataset = MaskTokensDataset.apply_mask( dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words, ) lang_dataset = NestedDictionaryDataset( { "net_input": { "src_tokens": PadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": PadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, ), "nsentences": NumSamplesDataset(), "ntokens": NumelDataset(src_dataset, reduce=True), "lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]), }, sizes=[src_dataset.sizes], ) lang_datasets.append(lang_dataset) dataset_lengths = np.array( [len(d) for d in lang_datasets], dtype=float, ) logger.info( "loaded total {} blocks for all languages".format( dataset_lengths.sum(), ) ) if split == self.args.train_subset: # For train subset, additionally up or down sample languages. sample_probs = self._get_sample_prob(dataset_lengths) logger.info( "Sample probability by language: ", { lang: "{0:.4f}".format(sample_probs[id]) for id, lang in enumerate(languages) }, ) size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths logger.info( "Up/Down Sampling ratio by language: ", { lang: "{0:.2f}".format(size_ratio[id]) for id, lang in enumerate(languages) }, ) resampled_lang_datasets = [ ResamplingDataset( lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=size_ratio[i] >= 1.0, ) for i, d in enumerate(lang_datasets) ] dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for lang_id, lang_dataset in enumerate(lang_datasets): split_name = split + "_" + languages[lang_id] lang_splits.append(split_name) self.datasets[split_name] = lang_dataset # [TODO]: This is hacky for now to print validation ppl for each # language individually. Maybe need task API changes to allow it # in more generic ways. if split in self.args.valid_subset: self.args.valid_subset = self.args.valid_subset.replace( split, ",".join(lang_splits) ) with data_utils.numpy_seed(self.args.seed + epoch): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset( dataset, sort_order=[ shuffle, dataset.sizes, ], ) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset( TokenBlockDataset( src_tokens, src_lengths, self.args.tokens_per_sample - 1, # one less for <s> pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), pad_idx=self.source_dictionary.pad(), left_pad=False, ) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": src_dataset, "src_lengths": NumelDataset(src_dataset, reduce=False), }, }, sizes=src_lengths, ) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
12,146
34.831858
87
py
sign-topic
sign-topic-main/fairseq/tasks/multilingual_language_modeling.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from omegaconf import II from fairseq import utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, Dictionary, IdDataset, LMContextWindowDataset, MonolingualDataset, NestedDictionaryDataset, NumelDataset, PadDataset, PrependTokenDataset, ResamplingDataset, SortDataset, StripTokenDataset, TokenBlockDataset, TruncatedDictionary, data_utils, ) from fairseq.data.indexed_dataset import get_available_dataset_impl from fairseq.data.shorten_dataset import maybe_shorten_dataset from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.tasks import LegacyFairseqTask, register_task SAMPLE_BREAK_MODE_CHOICES = ChoiceEnum(["none", "complete", "complete_doc", "eos"]) SHORTEN_METHOD_CHOICES = ChoiceEnum(["none", "truncate", "random_crop"]) logger = logging.getLogger(__name__) def lang_token(lang): return f"<{lang}>" @dataclass class MultilingualLanguageModelingConfig(FairseqDataclass): # TODO common var add to parent data: Optional[str] = field( default=None, metadata={"help": "path to data directory"} ) sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field( default="none", metadata={ "help": 'If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' "of sentence, but may include multiple sentences per sample. " '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.' }, ) tokens_per_sample: int = field( default=1024, metadata={"help": "max number of tokens per sample for LM dataset"}, ) output_dictionary_size: int = field( default=-1, metadata={"help": "limit the size of output dictionary"} ) self_target: bool = field(default=False, metadata={"help": "include self target"}) future_target: bool = field( default=False, metadata={"help": "include future target"} ) past_target: bool = field(default=False, metadata={"help": "include past target"}) add_bos_token: bool = field( default=False, metadata={"help": "prepend lang id token <dialect>"} ) max_source_positions: Optional[int] = field( default=None, metadata={"help": "max number of tokens in the source sequence"} ) max_target_positions: Optional[int] = field( default=None, metadata={"help": "max number of tokens in the target sequence"} ) pad_to_fixed_length: Optional[bool] = field( default=False, metadata={"help": "pad to fixed length"} ) pad_to_fixed_bsz: Optional[bool] = field( default=False, metadata={"help": "boolean to pad to fixed batch size"} ) multilang_sampling_alpha: Optional[float] = field( default=1.0, metadata={ "help": "smoothing alpha for sample rations across multiple datasets" }, ) shorten_method: SHORTEN_METHOD_CHOICES = field( default="none", metadata={ "help": "if not none, shorten sequences that exceed --tokens-per-sample" }, ) shorten_data_split_list: str = field( default="", metadata={ "help": "comma-separated list of dataset splits to apply shortening to, " 'e.g., "train,valid" (default: all dataset splits)' }, ) langs: str = field( default="", metadata={ "help": "comma-separated list of languages (default: all directories in data path)" }, ) baseline_model_langs: str = field( default="", metadata={ "help": "comma-separated list of languages in the baseline model (default: none)" }, ) # TODO: legacy parameter kept for compatibility baseline_model: str = field( default="", metadata={"help": "path to the baseline model (default: none)"}, ) lang_to_offline_shard_ratio: str = field( default="", metadata={ "help": "absolute path of tsv file location to indicate lang to offline shard ratio.", }, ) # TODO common vars below add to parent seed: int = II("common.seed") dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II( "dataset.dataset_impl" ) data_buffer_size: int = II("dataset.data_buffer_size") tpu: bool = II("common.tpu") batch_size: Optional[int] = II("dataset.batch_size") batch_size_valid: Optional[int] = II("dataset.batch_size_valid") train_subset: str = II("common.train_subset") valid_subset: str = II("common.valid_subset") @register_task( "multilingual_language_modeling", dataclass=MultilingualLanguageModelingConfig ) class MultilingualLanguageModelingTask(LegacyFairseqTask): """ Train a language model. Args: dictionary (~fairseq.data.Dictionary): the dictionary for the input of the language model output_dictionary (~fairseq.data.Dictionary): the dictionary for the output of the language model. In most cases it will be the same as *dictionary*, but could possibly be a more limited version of the dictionary (if ``--output-dictionary-size`` is used). targets (List[str]): list of the target types that the language model should predict. Can be one of "self", "future", and "past". Defaults to "future". .. note:: The language modeling task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate`, :mod:`fairseq-interactive` and :mod:`fairseq-eval-lm`. The language modeling task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.language_modeling_parser :prog: """ def __init__(self, args, dictionary, output_dictionary=None, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = output_dictionary or dictionary if targets is None: targets = ["future"] self.targets = targets @staticmethod def _get_langs(args, epoch=1): paths = utils.split_paths(args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] languages = sorted( name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name)) ) if args.langs: keep_langs = set(args.langs.split(",")) languages = [lang for lang in languages if lang in keep_langs] assert len(languages) == len(keep_langs) return languages, data_path @classmethod def setup_dictionary(cls, args, **kwargs): dictionary = None output_dictionary = None if args.data: paths = utils.split_paths(args.data) assert len(paths) > 0 dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) if args.add_bos_token: languages, _ = cls._get_langs(args) logger.info("----------------") for lang in languages: dictionary.add_symbol(lang_token(lang)) logger.info(f"add language token: {lang_token(lang)}") logger.info("----------------") logger.info("dictionary: {} types".format(len(dictionary))) output_dictionary = dictionary if args.output_dictionary_size >= 0: output_dictionary = TruncatedDictionary( dictionary, args.output_dictionary_size ) return (dictionary, output_dictionary) @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs) # upgrade old checkpoints if hasattr(args, "exclude_self_target"): args.self_target = not args.exclude_self_target targets = [] if getattr(args, "self_target", False): targets.append("self") if getattr(args, "future_target", False): targets.append("future") if getattr(args, "past_target", False): targets.append("past") if len(targets) == 0: # standard language modeling targets = ["future"] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args): model = super().build_model(args) for target in self.targets: if target not in model.supported_targets: raise ValueError( f"Unsupported language modeling target: {target} not in {model.supported_targets}" ) return model def _get_sample_prob(self, dataset_lens): """ Get smoothed sampling porbability by languages. This helps low resource languages by upsampling them. """ prob = dataset_lens / dataset_lens.sum() smoothed_prob = prob ** self.args.multilang_sampling_alpha smoothed_prob = smoothed_prob / smoothed_prob.sum() return smoothed_prob def load_dataset(self, split: str, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ languages, data_path = MultilingualLanguageModelingTask._get_langs( self.args, epoch ) lang_to_offline_shard_ratio = None if self.args.lang_to_offline_shard_ratio != "": lang_to_offline_shard_ratio = {} assert os.path.exists( self.args.lang_to_offline_shard_ratio ), "provided offline shard ratio file doesn't exist: {0}".format( self.args.lang_to_offline_shard_ratio ) with open(self.args.lang_to_offline_shard_ratio) as fin: for line in fin: lang, ratio = line.strip().split("\t") ratio = float(ratio) lang_to_offline_shard_ratio[lang] = ratio logger.info( "Found offline sharded ratio: %s", lang_to_offline_shard_ratio, ) if split == self.args.train_subset: logger.info( "Training on {0} languages: {1}".format(len(languages), languages) ) else: logger.info( "Evaluating on {0} languages: {1}".format(len(languages), languages) ) tokens_per_sample = self.args.tokens_per_sample - int(self.args.add_bos_token) fixed_pad_length = None if self.args.pad_to_fixed_length: fixed_pad_length = self.args.tokens_per_sample pad_to_bsz = None if self.args.pad_to_fixed_bsz: pad_to_bsz = ( self.args.batch_size_valid if "valid" in split else self.args.batch_size ) lang_datasets = [] for lang_id, language in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset( split_path, self.dictionary, self.args.dataset_impl, combine=combine ) # print('len(dataset) =', len(dataset)) if dataset is None: raise FileNotFoundError( "Dataset not found: {} ({})".format(split, split_path) ) dataset = maybe_shorten_dataset( dataset, split, self.args.shorten_data_split_list, self.args.shorten_method, tokens_per_sample, self.args.seed, ) dataset = TokenBlockDataset( dataset, dataset.sizes, tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True, ) add_eos_for_other_targets = ( self.args.sample_break_mode is not None and self.args.sample_break_mode != "none" ) src_lang_idx, tgt_lang_idx = None, None if self.args.add_bos_token: src_lang_idx = self.dictionary.index(lang_token(language)) tgt_lang_idx = self.output_dictionary.index(lang_token(language)) lang_datasets.append( MonolingualDataset( dataset=dataset, sizes=dataset.sizes, src_vocab=self.dictionary, tgt_vocab=self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True, targets=self.targets, fixed_pad_length=fixed_pad_length, pad_to_bsz=pad_to_bsz, add_bos_token=self.args.add_bos_token, src_lang_idx=src_lang_idx, tgt_lang_idx=tgt_lang_idx, ) ) dataset_lengths = np.array( [len(d) for d in lang_datasets], dtype=float, ) logger.info( "loaded total {} blocks for all languages".format( dataset_lengths.sum(), ) ) if split == self.args.train_subset: dataset_lengths_ratio_multiplier = np.ones(len(dataset_lengths)) if lang_to_offline_shard_ratio is not None: dataset_lengths_ratio_multiplier = [] for lang in languages: assert ( lang in lang_to_offline_shard_ratio ), "Lang: {0} missing in offline shard ratio file: {1}".format( lang, self.args.lang_to_offline_shard_ratio, ) dataset_lengths_ratio_multiplier.append( lang_to_offline_shard_ratio[lang] ) dataset_lengths_ratio_multiplier = np.array( dataset_lengths_ratio_multiplier ) true_dataset_lengths = ( dataset_lengths * dataset_lengths_ratio_multiplier ) else: true_dataset_lengths = dataset_lengths # For train subset, additionally up or down sample languages. sample_probs = self._get_sample_prob(true_dataset_lengths) logger.info( "Sample probability by language: %s", { lang: "{0:.4f}".format(sample_probs[id]) for id, lang in enumerate(languages) }, ) size_ratio = (sample_probs * true_dataset_lengths.sum()) / dataset_lengths # TODO: add an option for shrinking all size ratios to below 1 # if self.args.multilang_sampling_alpha != 1: # size_ratio /= size_ratio.max() # Fix numeric errors in size ratio computation # 0.999999999999999999 -> 1 # 1.000000000000000002 -> 1 for i in range(len(size_ratio)): size_ratio[i] = round(size_ratio[i], 8) logger.info( "Up/Down Sampling ratio by language: %s", { lang: "{0:.2f}".format(size_ratio[id]) for id, lang in enumerate(languages) }, ) logger.info( "Actual dataset size by language: %s", { lang: "{0:.2f}".format(len(lang_datasets[id])) for id, lang in enumerate(languages) }, ) resampled_lang_datasets = [ ResamplingDataset( lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=size_ratio[i] > 1.0, ) for i, d in enumerate(lang_datasets) ] logger.info( "Resampled dataset size by language: %s", { lang: "{0:.2f}".format(len(resampled_lang_datasets[id])) for id, lang in enumerate(languages) }, ) dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for lang_id, lang_dataset in enumerate(lang_datasets): split_name = split + "_" + languages[lang_id] lang_splits.append(split_name) self.datasets[split_name] = lang_dataset # [TODO]: This is hacky for now to print validation ppl for each # language individually. Maybe need task API changes to allow it # in more generic ways. if split in self.args.valid_subset: self.args.valid_subset = self.args.valid_subset.replace( split, ",".join(lang_splits) ) with data_utils.numpy_seed(self.args.seed + epoch): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset( dataset, sort_order=[ shuffle, dataset.sizes, ], ) def build_dataset_for_inference( self, src_tokens, src_lengths, language="en_XX", **kwargs ): """ Generate batches for inference. We prepend an eos token to src_tokens (or bos if `--add-bos-token` is set) and we append a <pad> to target. This is convenient both for generation with a prefix and LM scoring. """ dataset = StripTokenDataset( TokenBlockDataset( src_tokens, src_lengths, block_size=None, # ignored for "eos" break mode pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode="eos", ), # remove eos from (end of) target sequence self.source_dictionary.eos(), ) src_lang_idx = self.dictionary.index(lang_token(language)) src_dataset = PrependTokenDataset( dataset, token=( (src_lang_idx or self.source_dictionary.bos()) if getattr(self.args, "add_bos_token", False) else self.source_dictionary.eos() ), ) max_seq_len = max(src_lengths) + 1 tgt_dataset = AppendTokenDataset(dataset, token=self.source_dictionary.pad()) return NestedDictionaryDataset( { "id": IdDataset(), "net_input": { "src_tokens": PadDataset( src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, pad_length=max_seq_len, ), "src_lengths": NumelDataset(src_dataset, reduce=False), }, "target": PadDataset( tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False, pad_length=max_seq_len, ), }, sizes=[np.array(src_lengths)], ) @torch.no_grad() def inference_step( self, generator, models, sample, language="en_XX", prefix_tokens=None, constraints=None, ): # Generation will always be conditioned on bos_token if getattr(self.args, "add_bos_token", False): src_lang_idx = self.dictionary.index(lang_token(language)) bos_token = src_lang_idx or self.source_dictionary.bos() else: bos_token = self.source_dictionary.eos() if constraints is not None: raise NotImplementedError( "Constrained decoding with the language_modeling task is not supported" ) # SequenceGenerator doesn't use src_tokens directly, we need to # pass the `prefix_tokens` argument instead if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement(): prefix_tokens = sample["net_input"]["src_tokens"] if prefix_tokens[:, 0].eq(bos_token).all(): prefix_tokens = prefix_tokens[:, 1:] return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token ) def eval_lm_dataloader( self, dataset, max_tokens: Optional[int] = 36000, batch_size: Optional[int] = None, max_positions: Optional[int] = None, num_shards: int = 1, shard_id: int = 0, num_workers: int = 1, data_buffer_size: int = 10, # ensures that every evaluated token has access to a context of at least # this size, if possible context_window: int = 0, ): if context_window > 0: dataset = LMContextWindowDataset( dataset=dataset, tokens_per_sample=self.args.tokens_per_sample, context_window=context_window, pad_idx=self.source_dictionary.pad(), ) return self.get_batch_iterator( dataset=dataset, max_tokens=max_tokens, max_sentences=batch_size, max_positions=max_positions, ignore_invalid_inputs=True, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, data_buffer_size=data_buffer_size, ) @property def source_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.dictionary @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.output_dictionary
22,922
35.501592
102
py
sign-topic
sign-topic-main/fairseq/tasks/online_backtranslation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import logging import math import os from argparse import Namespace from collections import OrderedDict, defaultdict from pathlib import Path from typing import Dict, Sequence, Tuple from argparse import ArgumentError import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import fairseq from fairseq import metrics, options, utils from fairseq.data import ( FairseqDataset, LanguagePairDataset, NoisingDataset, PrependTokenDataset, RoundRobinZipDatasets, TransformEosLangPairDataset, data_utils, encoders, ) from fairseq.sequence_generator import SequenceGenerator from fairseq.tasks import register_task from fairseq.tasks.translation import TranslationTask, load_langpair_dataset logger = logging.getLogger(__name__) class PiecewiseLinearFn: """Piecewise linear function. Can be configured with a string.""" def __init__(self, pieces: Sequence[Tuple[int, float]]): assert pieces == sorted( pieces ), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}" self.pieces = pieces def __call__(self, x: int) -> float: for i, (x_a, y_a) in enumerate(self.pieces[:-1]): x_b, y_b = self.pieces[i + 1] if x_a <= x <= x_b: return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a) return self.pieces[-1][1] @staticmethod def from_string(configuration: str) -> "PiecewiseLinearFn": """ Parse the configuration of lambda coefficient (for scheduling). x = "3" # lambda will be a constant equal to x x = "0:1,1000:0" # lambda will start from 1 and linearly decrease # to 0 during the first 1000 iterations x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000 # iterations, then will linearly increase to 1 until iteration 2000 """ if isinstance(configuration, float): return PiecewiseLinearFn([(0, configuration)]) try: parts = configuration.split(",") if len(parts) == 1: v = float(configuration) return PiecewiseLinearFn([(0, v)]) split = [s.split(":") for s in parts] pieces = [(int(t), float(v)) for t, v in split] return PiecewiseLinearFn(pieces) except Exception: raise ValueError( f"Invalid PiecewiseLinearFn configuration: {configuration!r}" ) @staticmethod def one() -> "PiecewiseLinearFn": return PiecewiseLinearFn([(0, 1.0)]) @register_task("online_backtranslation") class OnlineBackTranslationTask(TranslationTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off # Generic translation args parser.add_argument('data', help='colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner; \ however, valid and test data are always in the first directory to \ avoid the need for repeating them in all directories') parser.add_argument('--mono-langs', metavar='MONO_LANGS', help='monolingual languages for training') parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS', help='language pairs for validation') parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments') parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') try: parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions') parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N', help='if >0, then bucket source and target lengths into N ' 'buckets and pad accordingly; this is useful on TPUs ' 'to minimize the number of compilations') # Denoising args parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', help='maximum word shuffle distance for denoising autoencoding data generation') parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', help='word dropout probability for denoising autoencoding data generation') parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', help='word blanking probability for denoising autoencoding data generation') # Backtranslation args parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N', help='back-translation weight') parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N', help='denoising auto-encoder weight') # Evaluation args parser.add_argument('--generate-one-by-one', action='store_true', help='generate one sentence at a time for backtranslation') parser.add_argument('--eval-bleu', action='store_true', help='evaluation with BLEU scores') parser.add_argument('--eval-bleu-detok', type=str, default="space", help='detokenize before computing BLEU (e.g., "moses"); ' 'required if using --eval-bleu; use "space" to ' 'disable detokenization; see fairseq.data.encoders ' 'for other options') parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON', help='args for building the tokenizer, if needed') parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False, help='compute tokenized BLEU instead of sacrebleu') parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE before computing BLEU') parser.add_argument('--eval-bleu-args', type=str, metavar='JSON', help='generation args for BLUE scoring, ' 'e.g., \'{"beam": 4, "lenpen": 0.6}\'') parser.add_argument('--eval-bleu-print-samples', action='store_true', help='print sample generations during validation') # fmt: on def __init__(self, args, common_dict, mono_langs, valid_lang_pairs): super().__init__(args, common_dict, common_dict) self.common_dict = common_dict self.mono_langs = mono_langs self.valid_lang_pairs = valid_lang_pairs self.SHOW_SAMPLES_INTERVAL = 1000 # Start by showing samples self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL self.SHOW_SAMPLES_NUMBER = 5 self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt) self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae) self.args = args self.data = utils.split_paths(self.args.data) if len(self.data) == 1: shards = list(Path(self.data[0]).glob("shard*")) if len(shards) > 0: # keep this as strings, since it can also be a manifold path old_data = self.data self.data = [str(shard) for shard in shards] logging.warning(f"Expanded data directory {old_data} to {self.data}") @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) paths = utils.split_paths(args.data) assert len(paths) > 0 assert args.mono_langs is not None mono_langs = args.mono_langs.split(",") valid_lang_pairs = args.valid_lang_pairs.split(",") # load dictionary dict_path = os.path.join(paths[0], "dict.txt") common_dict = cls.load_dictionary(dict_path) return cls(args, common_dict, mono_langs, valid_lang_pairs) def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset: """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if split == "train": data_path = self.data[(epoch - 1) % len(self.data)] dataset = self.load_train_dataset(data_path) else: # valid/test should always be the same. dataset = self.load_translation_dataset(split, self.data[0]) self.datasets[split] = dataset return dataset def load_train_dataset(self, data_path: str) -> FairseqDataset: """The training dataset is made of backtranslation dataset and denoising dataset.""" data = [] for lang in self.mono_langs: train_path = os.path.join(data_path, lang, "train") # TODO: could we do the BT using denoise sample ? # this would half the data loading work data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang))) data.append( (f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang)) ) return RoundRobinZipDatasets(OrderedDict(data)) def _langpair_dataset( self, src: FairseqDataset, tgt: FairseqDataset ) -> LanguagePairDataset: return LanguagePairDataset( src, src.sizes, self.dictionary, tgt=tgt, tgt_sizes=tgt.sizes, tgt_dict=self.dictionary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, # TODO: should we shuffle ? we are already sorting batch by sizes so ? # shuffle=True, ) def _prepend_lang_bos_to_target( self, dataset: LanguagePairDataset, lang: str ) -> LanguagePairDataset: bos = _lang_token_index(self.dictionary, lang) return TransformEosLangPairDataset( dataset, src_eos=self.dictionary.eos(), new_src_eos=self.dictionary.eos(), tgt_bos=self.dictionary.eos(), new_tgt_bos=bos, ) def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset: """The BT dataset is generated with (tgt, tgt) pairs. The actual translation to a (generated_src, tgt) pair is done on the fly during training. """ mono_dataset = data_utils.load_indexed_dataset( data_path, self.common_dict, self.args.dataset_impl ) assert mono_dataset is not None, f"No dataset found for {lang}" mono_dataset_src = PrependTokenDataset( mono_dataset, _lang_token_index(self.dictionary, lang) ) mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset) logger.info( f"mono_lang = {lang} " f"lang token index = {_lang_token_index(self.dictionary, lang)} " f"lang token = {_lang_token(lang)}" ) mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang) return mono_dataset_bt def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset: """Classic denoising dataset""" dataset = data_utils.load_indexed_dataset( data_path, self.common_dict, self.args.dataset_impl ) noisy_dataset = NoisingDataset( dataset, self.dictionary, seed=1, max_word_shuffle_distance=self.args.max_word_shuffle_distance, word_dropout_prob=self.args.word_dropout_prob, word_blanking_prob=self.args.word_blanking_prob, ) noisy_dataset = PrependTokenDataset( noisy_dataset, _lang_token_index(self.dictionary, lang) ) clean_dataset = data_utils.load_indexed_dataset( data_path, self.common_dict, self.args.dataset_impl ) denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset) denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang) return denoising_dataset def load_translation_dataset( self, split: str, data_path: str, combine: bool = False ): # only judging with one language pair for the moment, # since ConcatDataset doesn't work as expected assert len(self.valid_lang_pairs) == 1, "For now..." valid_lang_pair = self.valid_lang_pairs[0] src, tgt = valid_lang_pair.split("-") # use the same function than TranslationTask src_tgt_dt = load_langpair_dataset( data_path, split, src, self.common_dict, tgt, self.common_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments, truncate_source=self.args.truncate_source, num_buckets=self.args.num_batch_buckets, shuffle=(split != "test"), prepend_bos_src=_lang_token_index(self.dictionary, src), ) src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt) src_tgt_eos_dt.args = self.args return src_tgt_eos_dt def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): raise NotImplementedError def build_model(self, args): # torch.autograd.set_detect_anomaly(True) model = super().build_model(args) add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs) self.sequence_generators = {} for mono_lang in self.mono_langs: self.sequence_generators[mono_lang] = SequenceGenerator( [model], tgt_dict=self.dictionary, beam_size=1, max_len_a=1.3, max_len_b=5, min_len=5, # keep 1 to be able to prepend bos max_len=model.max_decoder_positions() - 1, ) if getattr(args, "eval_bleu", False): assert getattr(args, "eval_bleu_detok", None) is not None, ( "--eval-bleu-detok is required if using --eval-bleu; " "try --eval-bleu-detok=moses (or --eval-bleu-detok=space " "to disable detokenization, e.g., when using sentencepiece)" ) detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}") self.tokenizer = encoders.build_tokenizer( Namespace( tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args ) ) gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}") self.bleu_sequence_generator = self.build_generator( [model], Namespace(**gen_args) ) return model def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.common_dict def display_samples_once_in_a_while(self, smp, mono_lang, other_lang): self._show_samples_ctr += 1 if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL: return self._show_samples_ctr = 0 ln = smp["net_input"]["src_tokens"].shape[0] logger.info( f"(r:{self.args.distributed_rank}) : " f"{other_lang} ---> {mono_lang} " f"({other_lang} was generated by back-translation.) {ln} samples" ) for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)): src_tokens = smp["net_input"]["src_tokens"][i] tgt_tokens = smp["target"][i] src_str = self.dictionary.string(src_tokens, "sentencepiece") tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece") logger.info( f"\n{i}\t\t[{other_lang} generated] {src_str}\n" f"\t\t[{mono_lang} original ] {tgt_str}\n" f"\t\t[ src tokens] {src_tokens}\n" ) def backtranslate_sample(self, smp, orig_lang, other_lang) -> None: """ * WARNING: smp is modified in place. * At the start of this function, `smp` has the same input and target: |--------------------------------------------------------| | smp['net_input']['src_tokens'] | smp['target'] | | (from data) __en__ hello world | __en__ hello world | |--------------------------------------------------------| * We call generator.generate(smp, bos_token = token("ro")), and copy the result as input * At the end, `smp` has the translation to other language. |--------------------------------------------------------| | smp['net_input']['src_tokens'] | smp['target'] | | (generated) __ro__ salut lume | __en__ hello world | |--------------------------------------------------------| """ bos_token = _lang_token_index(self.dictionary, other_lang) generated = self.sequence_generators[orig_lang].generate( models=[], sample=smp, bos_token=bos_token ) max_lngth = max([gn[0]["tokens"].size(0) for gn in generated]) net_input = smp["net_input"] n_src_tokens = torch.empty( size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype ) n_src_lengths = torch.empty( len(generated), dtype=net_input["src_lengths"].dtype ) for i, gn in enumerate(generated): tokens = gn[0]["tokens"] tokens_size = tokens.size(0) padding_needed = max_lngth - tokens_size tokens = torch.cat([tokens.new([bos_token]), tokens]) tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad()) n_src_tokens[i] = tokens n_src_lengths[i] = tokens_size + 1 device = net_input["src_tokens"].device # This seems to be important del net_input["src_tokens"] del net_input["src_lengths"] net_input["src_tokens"] = n_src_tokens.to(device) net_input["src_lengths"] = n_src_lengths.to(device) def generate(self, smp, model): model.eval() orig_lang = ( self.dictionary[smp["net_input"]["src_tokens"][0][0]] .replace(" ", "") .replace("_", "") ) bos_token = smp["net_input"]["prev_output_tokens"][0][0] with torch.no_grad(): generated = self.sequence_generators[orig_lang].generate( models=[model], sample=smp, bos_token=bos_token ) return generated def get_other_lang(self, lang): # TODO: allow more complex mapping if lang != self.mono_langs[0]: return self.mono_langs[0] if len(self.mono_langs) == 2: return self.mono_langs[1] return self.mono_langs[np.random.randint(1, len(self.mono_langs))] def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() model.set_num_updates(update_num) agg_loss, agg_sample_size = 0.0, 0.0 agg_logging_output: Dict[str, float] = defaultdict(float) dataset_keys = self.datasets["train"].datasets.keys() weights = { "BT": self.lambda_bt(update_num), "DENOISE": self.lambda_dae(update_num), } log_keys = {"BT": "bt_", "DENOISE": "dae_"} for dataset_key in dataset_keys: smp = sample[dataset_key] mono_lang, task_subtype = dataset_key.split("-") if weights[task_subtype] == 0: continue if task_subtype == "BT": with torch.autograd.profiler.record_function("backtranslation"): model.eval() # TODO: Could we translate to several language at once ? # this would allow to share encoder_out and maximize GPU usage. other_lang = self.get_other_lang(mono_lang) self.backtranslate_sample(smp, mono_lang, other_lang) self.display_samples_once_in_a_while(smp, mono_lang, other_lang) model.train() # Like in FairseqTask.train_step with torch.autograd.profiler.record_function("forward"): loss, sample_size, logging_output = criterion(model, smp) loss *= weights[task_subtype] if ignore_grad: loss *= 0 with torch.autograd.profiler.record_function("backward"): optimizer.backward(loss) agg_loss += loss.item() agg_sample_size += sample_size for k in logging_output: agg_logging_output[log_keys[task_subtype] + k] += logging_output[k] agg_logging_output[k] += logging_output[k] return agg_loss, agg_sample_size, agg_logging_output def get_bos_token_from_sample(self, sample): net_input = sample["net_input"] source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item() source_lang_token = self.dictionary[source_lang_token_id].replace("_", "") target_lang_token_id = _lang_token_index( self.dictionary, self.get_other_lang(source_lang_token) ) return target_lang_token_id def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs) if bt_sample_size: bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs) bt_loss_sum *= 1 / bt_sample_size / math.log(2) metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3) bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs) bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs) bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2) metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3) metrics.log_derived( "bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg) ) dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs) if dae_sample_size: dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs) dae_loss_sum *= 1 / dae_sample_size / math.log(2) metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3) dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs) dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs) dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2) metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3) metrics.log_derived( "dae_ppl", lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg), ) @torch.no_grad() def extend_embedding( emb: nn.Module, new_vocab_size: int, copy_from_token_id: int ) -> None: old_emb_data = emb.weight.data (old_vocab_size, dim) = old_emb_data.shape assert new_vocab_size >= old_vocab_size if new_vocab_size > old_vocab_size: emb.weight.data = torch.zeros((new_vocab_size, dim)) emb.weight.data[:old_vocab_size, :] = old_emb_data # initialize new embeddings emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id] if hasattr(emb, "num_embeddings"): emb.num_embeddings = new_vocab_size if hasattr(emb, "out_features"): emb.out_features = new_vocab_size if getattr(emb, "bias", None) is None: return # Fix the bias. # Bias shape can be different from the previous vocab size # if the weight matrix was shared and alread extended but not the bias. (old_vocab_size,) = emb.bias.shape assert new_vocab_size >= old_vocab_size if new_vocab_size > old_vocab_size: old_bias = emb.bias.data new_bias = torch.zeros( (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device ) new_bias[:old_vocab_size] = old_bias emb.bias.data = new_bias def add_secial_tokens_to_dict_and_model( dictionary: "fairseq.data.Dictionary", model: nn.Module, mono_langs: Sequence[str], ) -> None: embs = model.encoder.embed_tokens vocab_size, embedding_dim = embs.weight.shape # The model may or may not have a '<mask>' embedding yet assert ( len(dictionary) <= vocab_size <= len(dictionary) + 1 ), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})" # TODO: we should reuse the pretrained model dict which already has <mask> dictionary.add_symbol("<mask>") for lang in mono_langs: lang_token = _lang_token(lang) dictionary.add_symbol(lang_token) logger.info( f"dictionary: {len(dictionary)} -> {vocab_size} tokens " f"after adding {len(mono_langs)} lang tokens." ) if len(dictionary) <= vocab_size: return extend_embedding(embs, len(dictionary), dictionary.bos()) dec_embs = model.decoder.embed_tokens extend_embedding(dec_embs, len(dictionary), dictionary.bos()) lm_head = model.decoder.output_projection extend_embedding(lm_head, len(dictionary), dictionary.bos()) assert lm_head.weight.shape == (len(dictionary), embedding_dim) def _lang_token(lang: str) -> str: return f"__{lang}__" def _lang_token_index(dictionary, lang: str) -> int: return dictionary.index(_lang_token(lang)) @contextlib.contextmanager def assert_weights_have_changed(model: nn.Module): def checksum(model: nn.Module) -> float: return sum(p.sum().item() for p in model.parameters()) initial_checksum = checksum(model) yield model final_checksum = checksum(model) logger.info( f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}" ) assert initial_checksum != final_checksum, "Model hasn't changed !"
28,578
40.843338
118
py
sign-topic
sign-topic-main/fairseq/tasks/multilingual_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os from collections import OrderedDict from argparse import ArgumentError import torch from fairseq import metrics, options, utils from fairseq.data import ( Dictionary, LanguagePairDataset, RoundRobinZipDatasets, TransformEosLangPairDataset, ) from fairseq.models import FairseqMultiModel from fairseq.tasks.translation import load_langpair_dataset from . import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) def _lang_token(lang: str): return "__{}__".format(lang) def _lang_token_index(dic: Dictionary, lang: str): """Return language token index.""" idx = dic.index(_lang_token(lang)) assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang) return idx @register_task("multilingual_translation") class MultilingualTranslationTask(LegacyFairseqTask): """A task for training multiple translation models simultaneously. We iterate round-robin over batches from multiple language pairs, ordered according to the `--lang-pairs` argument. The training loop is roughly: for i in range(len(epoch)): for lang_pair in args.lang_pairs: batch = next_batch_for_lang_pair(lang_pair) loss = criterion(model_for_lang_pair(lang_pair), batch) loss.backward() optimizer.step() In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that implements the `FairseqMultiModel` interface. During inference it is required to specify a single `--source-lang` and `--target-lang`, which indicates the inference langauge direction. `--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to the same value as training. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off parser.add_argument('data', metavar='DIR', help='path to data directory') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)') try: parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target ' 'language token. (src/tgt)') parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token') # fmt: on def __init__(self, args, dicts, training): super().__init__(args) self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] # eval_lang_pairs for multilingual translation is usually all of the # lang_pairs. However for other multitask settings or when we want to # optimize for certain languages we want to use a different subset. Thus # the eval_lang_pairs class variable is provided for classes that extend # this class. self.eval_lang_pairs = self.lang_pairs # model_lang_pairs will be used to build encoder-decoder model pairs in # models.build_model(). This allows multitask type of sub-class can # build models other than the input lang_pairs self.model_lang_pairs = self.lang_pairs self.langs = list(dicts.keys()) @classmethod def setup_task(cls, args, **kwargs): dicts, training = cls.prepare(args, **kwargs) return cls(args, dicts, training) @classmethod def update_args(cls, args): args.left_pad_source = utils.eval_bool(args.left_pad_source) args.left_pad_target = utils.eval_bool(args.left_pad_target) if args.lang_pairs is None: raise ValueError( "--lang-pairs is required. List all the language pairs in the training objective." ) if isinstance(args.lang_pairs, str): args.lang_pairs = args.lang_pairs.split(",") @classmethod def prepare(cls, args, **kargs): cls.update_args(args) sorted_langs = sorted( list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")}) ) if args.source_lang is not None or args.target_lang is not None: training = False else: training = True # load dictionaries dicts = OrderedDict() for lang in sorted_langs: paths = utils.split_paths(args.data) assert len(paths) > 0 dicts[lang] = cls.load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(lang)) ) if len(dicts) > 0: assert dicts[lang].pad() == dicts[sorted_langs[0]].pad() assert dicts[lang].eos() == dicts[sorted_langs[0]].eos() assert dicts[lang].unk() == dicts[sorted_langs[0]].unk() if args.encoder_langtok is not None or args.decoder_langtok: for lang_to_add in sorted_langs: dicts[lang].add_symbol(_lang_token(lang_to_add)) logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang]))) return dicts, training def get_encoder_langtok(self, src_lang, tgt_lang): if self.args.encoder_langtok is None: return self.dicts[src_lang].eos() if self.args.encoder_langtok == "src": return _lang_token_index(self.dicts[src_lang], src_lang) else: return _lang_token_index(self.dicts[src_lang], tgt_lang) def get_decoder_langtok(self, tgt_lang): if not self.args.decoder_langtok: return self.dicts[tgt_lang].eos() return _lang_token_index(self.dicts[tgt_lang], tgt_lang) def alter_dataset_langtok( self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None, ): if self.args.encoder_langtok is None and not self.args.decoder_langtok: return lang_pair_dataset new_src_eos = None if ( self.args.encoder_langtok is not None and src_eos is not None and src_lang is not None and tgt_lang is not None ): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang) else: src_eos = None new_tgt_bos = None if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None: new_tgt_bos = self.get_decoder_langtok(tgt_lang) else: tgt_eos = None return TransformEosLangPairDataset( lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos, ) def load_dataset(self, split, epoch=1, **kwargs): """Load a dataset split.""" paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] def language_pair_dataset(lang_pair): src, tgt = lang_pair.split("-") langpair_dataset = load_langpair_dataset( data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, ) return self.alter_dataset_langtok( langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt, ) self.datasets[split] = RoundRobinZipDatasets( OrderedDict( [ (lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs ] ), eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the multilingual_translation task is not supported" ) lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang) return RoundRobinZipDatasets( OrderedDict( [ ( lang_pair, self.alter_dataset_langtok( LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary ), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang, ), ) ] ), eval_key=lang_pair, ) def build_model(self, args): def check_args(): messages = [] if ( len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0 ): messages.append( "--lang-pairs should include all the language pairs {}.".format( args.lang_pairs ) ) if self.args.encoder_langtok != args.encoder_langtok: messages.append( "--encoder-langtok should be {}.".format(args.encoder_langtok) ) if self.args.decoder_langtok != args.decoder_langtok: messages.append( "--decoder-langtok should {} be set.".format( "" if args.decoder_langtok else "not" ) ) if len(messages) > 0: raise ValueError(" ".join(messages)) # Update args -> the fact that the constructor here # changes the args object doesn't mean you get the same one here self.update_args(args) # Check if task args are consistant with model args check_args() from fairseq import models model = models.build_model(args, self) if not isinstance(model, FairseqMultiModel): raise ValueError( "MultilingualTranslationTask requires a FairseqMultiModel architecture" ) return model def _per_lang_pair_train_loss( self, lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad ): loss, sample_size, logging_output = criterion( model.models[lang_pair], sample[lang_pair] ) if ignore_grad: loss *= 0 optimizer.backward(loss) return loss, sample_size, logging_output def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() from collections import defaultdict agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float) curr_lang_pairs = [ lang_pair for lang_pair in self.model_lang_pairs if sample[lang_pair] is not None and len(sample[lang_pair]) != 0 ] for idx, lang_pair in enumerate(curr_lang_pairs): def maybe_no_sync(): if ( self.args.distributed_world_size > 1 and hasattr(model, "no_sync") and idx < len(curr_lang_pairs) - 1 ): return model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager with maybe_no_sync(): loss, sample_size, logging_output = self._per_lang_pair_train_loss( lang_pair, model, update_num, criterion, sample, optimizer, ignore_grad, ) agg_loss += loss.detach().item() # TODO make summing of the sample sizes configurable agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k] return agg_loss, agg_sample_size, agg_logging_output def _per_lang_pair_valid_loss(self, lang_pair, model, criterion, sample): return criterion(model.models[lang_pair], sample[lang_pair]) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): from collections import defaultdict agg_loss, agg_sample_size, agg_logging_output = 0.0, 0.0, defaultdict(float) for lang_pair in self.eval_lang_pairs: if ( lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0 ): continue loss, sample_size, logging_output = self._per_lang_pair_valid_loss( lang_pair, model, criterion, sample ) agg_loss += loss.data.item() # TODO make summing of the sample sizes configurable agg_sample_size += sample_size for k in logging_output: agg_logging_output[k] += logging_output[k] agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k] return agg_loss, agg_sample_size, agg_logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): if self.args.decoder_langtok: bos_token = _lang_token_index( self.target_dictionary, self.args.target_lang ) else: bos_token = self.target_dictionary.eos() return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=bos_token, ) def reduce_metrics(self, logging_outputs, criterion): with metrics.aggregate(): # pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task super().reduce_metrics(logging_outputs, criterion) for k in ["sample_size", "nsentences", "ntokens"]: metrics.log_scalar(k, sum(l[k] for l in logging_outputs)) @property def source_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.source_lang] @property def target_dictionary(self): if self.training: return next(iter(self.dicts.values())) else: return self.dicts[self.args.target_lang] def max_positions(self): """Return the max sentence length allowed by the task.""" if len(self.datasets.values()) == 0: return { "%s-%s" % (self.args.source_lang, self.args.target_lang): ( self.args.max_source_positions, self.args.max_target_positions, ) } return OrderedDict( [ (key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys() ] )
18,125
38.149028
118
py
sign-topic
sign-topic-main/fairseq/tasks/translation_lev.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import torch from fairseq import utils from fairseq.data import LanguagePairDataset from fairseq.dataclass import ChoiceEnum from fairseq.tasks import register_task from fairseq.tasks.translation import ( TranslationConfig, TranslationTask, load_langpair_dataset, ) from fairseq.utils import new_arange NOISE_CHOICES = ChoiceEnum(["random_delete", "random_mask", "no_noise", "full_mask"]) @dataclass class TranslationLevenshteinConfig(TranslationConfig): noise: NOISE_CHOICES = field( default="random_delete", metadata={"help": "type of noise"}, ) @register_task("translation_lev", dataclass=TranslationLevenshteinConfig) class TranslationLevenshteinTask(TranslationTask): """ Translation (Sequence Generation) task for Levenshtein Transformer See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_. """ cfg: TranslationLevenshteinConfig def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ paths = utils.split_paths(self.cfg.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] # infer langcode src, tgt = self.cfg.source_lang, self.cfg.target_lang self.datasets[split] = load_langpair_dataset( data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.cfg.dataset_impl, upsample_primary=self.cfg.upsample_primary, left_pad_source=self.cfg.left_pad_source, left_pad_target=self.cfg.left_pad_target, max_source_positions=self.cfg.max_source_positions, max_target_positions=self.cfg.max_target_positions, prepend_bos=True, ) def inject_noise(self, target_tokens): def _random_delete(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() max_len = target_tokens.size(1) target_mask = target_tokens.eq(pad) target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_( target_tokens.eq(bos) | target_tokens.eq(eos), 0.0 ) target_score.masked_fill_(target_mask, 1) target_score, target_rank = target_score.sort(1) target_length = target_mask.size(1) - target_mask.float().sum( 1, keepdim=True ) # do not delete <bos> and <eos> (we assign 0 score for them) target_cutoff = ( 2 + ( (target_length - 2) * target_score.new_zeros(target_score.size(0), 1).uniform_() ).long() ) target_cutoff = target_score.sort(1)[1] >= target_cutoff prev_target_tokens = ( target_tokens.gather(1, target_rank) .masked_fill_(target_cutoff, pad) .gather(1, target_rank.masked_fill_(target_cutoff, max_len).sort(1)[1]) ) prev_target_tokens = prev_target_tokens[ :, : prev_target_tokens.ne(pad).sum(1).max() ] return prev_target_tokens def _random_mask(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() unk = self.tgt_dict.unk() target_masks = ( target_tokens.ne(pad) & target_tokens.ne(bos) & target_tokens.ne(eos) ) target_score = target_tokens.clone().float().uniform_() target_score.masked_fill_(~target_masks, 2.0) target_length = target_masks.sum(1).float() target_length = target_length * target_length.clone().uniform_() target_length = target_length + 1 # make sure to mask at least one token. _, target_rank = target_score.sort(1) target_cutoff = new_arange(target_rank) < target_length[:, None].long() prev_target_tokens = target_tokens.masked_fill( target_cutoff.scatter(1, target_rank, target_cutoff), unk ) return prev_target_tokens def _full_mask(target_tokens): pad = self.tgt_dict.pad() bos = self.tgt_dict.bos() eos = self.tgt_dict.eos() unk = self.tgt_dict.unk() target_mask = ( target_tokens.eq(bos) | target_tokens.eq(eos) | target_tokens.eq(pad) ) return target_tokens.masked_fill(~target_mask, unk) if self.cfg.noise == "random_delete": return _random_delete(target_tokens) elif self.cfg.noise == "random_mask": return _random_mask(target_tokens) elif self.cfg.noise == "full_mask": return _full_mask(target_tokens) elif self.cfg.noise == "no_noise": return target_tokens else: raise NotImplementedError def build_generator(self, models, args, **unused): # add models input to match the API for SequenceGenerator from fairseq.iterative_refinement_generator import IterativeRefinementGenerator return IterativeRefinementGenerator( self.target_dictionary, eos_penalty=getattr(args, "iter_decode_eos_penalty", 0.0), max_iter=getattr(args, "iter_decode_max_iter", 10), beam_size=getattr(args, "iter_decode_with_beam", 1), reranking=getattr(args, "iter_decode_with_external_reranker", False), decoding_format=getattr(args, "decoding_format", None), adaptive=not getattr(args, "iter_decode_force_max_iter", False), retain_history=getattr(args, "retain_iter_history", False), ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: # Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/ raise NotImplementedError( "Constrained decoding with the translation_lev task is not supported" ) return LanguagePairDataset( src_tokens, src_lengths, self.source_dictionary, append_bos=True ) def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): model.train() sample["prev_target"] = self.inject_noise(sample["target"]) loss, sample_size, logging_output = criterion(model, sample) if ignore_grad: loss *= 0 optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): sample["prev_target"] = self.inject_noise(sample["target"]) loss, sample_size, logging_output = criterion(model, sample) return loss, sample_size, logging_output
7,416
36.841837
103
py
sign-topic
sign-topic-main/fairseq/tasks/SL_topic_detection.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import logging from pathlib import Path from argparse import Namespace import pandas as pd import numpy as np import torch import torch.nn as nn from fairseq.data import Dictionary from fairseq.data import AddTargetDataset from fairseq.data import LanguagePairDataset from dataclasses import dataclass from dataclasses import field from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.constants import ChoiceEnum from typing import Optional from omegaconf import MISSING, II from fairseq.data.sign_language import ( SignFeatsType, SLTopicDetectionDataset, ) from fairseq.data.text_compressor import TextCompressor from fairseq.data.text_compressor import TextCompressionLevel from fairseq.tasks import FairseqTask from fairseq.tasks import register_task from fairseq import metrics logger = logging.getLogger(__name__) @dataclass class SLTopicDetectionConfig(FairseqDataclass): data: str = field(default=MISSING, metadata={"help": "path to data directory"}) dict_path: str = field( default = MISSING, metadata={'help': 'Path to dictionary mapping category number to category name'}, ) modeling_task: str = field( default = 'classification', metadata={'help': 'Modeling task.'}, ) num_labels: str = field( default=10, metadata={'help': 'Number of labelswhen modeling_task is classification'} ) max_source_positions: Optional[int] = field( default=5500, metadata={"help": "max number of frames in the source sequence"} ) min_source_positions: Optional[int] = field( default=150, metadata={"help": "min number of frames in the source sequence"} ) normalize: bool = field( default=False, metadata={"help": "if set, normalizes input to have 0 mean and unit variance"}, ) body_parts: str = field( default = "face,upperbody,lefthand,righthand", metadata={"help": "Select the keypoints that you want to use. Options: 'face','upperbody','lowerbody','lefthand', 'righthand'"}, ) feat_dims: str = field( default = "0,1,2", metadata={"help": "Select the keypoints dimensions that you want to use. Options: 0, 1, 2, 3"}, ) shuffle_dataset: bool = field( default=True, metadata={"help": "set True to shuffle the dataset between epochs"}, ) num_batch_buckets: int = field( default=0, metadata={"help": "number of buckets"}, ) text_compression_level: ChoiceEnum([x.name for x in TextCompressionLevel]) = field( default="none", metadata={ "help": "compression level for texts (e.g. audio filenames, " "target texts): none/low/high (default: none). " }, ) feats_type: ChoiceEnum([x.name for x in SignFeatsType]) = field( default="keypoints", metadata={ "help": ( "type of features for the sign input data:" "keypoints/mediapipe_keypoints/rotational/mediapipe_rotational/i3d/spot_align/spot_align_albert/text/text_albert (default: keypoints)." ) }, ) eval_accuracy: bool = field( default=True, metadata={'help': 'set to True to evaluate validation accuracy'}, ) tpu: bool = II("common.tpu") bpe_sentencepiece_model: str = II("bpe.sentencepiece_model") @register_task("SL_topic_detection", dataclass=SLTopicDetectionConfig) class SLTopicDetectionTask(FairseqTask): def __init__(self, cfg, label_dict=None, src_dict=None): # TODO: check that src_dict is passed when text data is used super().__init__(cfg) self.label_dict = label_dict self.src_dict = src_dict if SignFeatsType[cfg.feats_type] in [SignFeatsType.text, SignFeatsType.spot_align]: self.bpe_tokenizer = self.build_bpe( Namespace( bpe='sentencepiece', sentencepiece_model=cfg.bpe_sentencepiece_model ) ) self.softmax = nn.Softmax(dim=1) @classmethod def setup_task(cls, cfg): if 'SEED' in os.environ: seed = int(os.environ.get('SEED')) torch.manual_seed(seed) np.random.seed(seed) if SignFeatsType[cfg.feats_type] in [SignFeatsType.text, SignFeatsType.spot_align]: # cfg.bpe_sentencepiece_model = os.environ.get('SP_MODEL', cfg.bpe_sentencepiece_model) ## TODO: this is a temporary fix for ALTI on transformerCLS dict_path = Path(cfg.bpe_sentencepiece_model).with_suffix('.txt') # print(f'dict_path = {dict_path}') if not dict_path.is_file(): raise FileNotFoundError(f"Dict not found: {dict_path.as_posix()}") src_dict = Dictionary.load(dict_path.as_posix()) logger.info( f"dictionary size ({dict_path.name}): " f"{len(src_dict):,}" ) return cls(cfg, src_dict=src_dict) return cls(cfg) def load_dataset(self, split, epoch=1, combine=False, **kwargs): root_dir = Path(self.cfg.data) assert root_dir.is_dir(), f'{root_dir} does not exist' manifest_file = root_dir / f"{split}_filt.csv" if SignFeatsType(self.cfg.feats_type) in [ SignFeatsType.keypoints, SignFeatsType.mediapipe_keypoints, SignFeatsType.rotational, SignFeatsType.mediapipe_rotational, SignFeatsType.i3d, SignFeatsType.spot_align_albert, SignFeatsType.text_albert, ]: feats_path = root_dir / f"{split}_filt.h5" elif SignFeatsType(self.cfg.feats_type) == SignFeatsType.video: DATA_PATH = { 'train': '/home/alvaro/Documents/ML and DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/video', 'val': '/home/alvaro/Documents/ML and DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/video', 'test': '/home/alvaro/Documents/ML and DL/How2Sign/TFG/Sign-Language-Topic-Detection/data/How2Sign/video', } feats_path = DATA_PATH[split] # TODO: decide what path to load from. Probably: feats_path = root_dir / f"{split}_filt.h5" elif SignFeatsType(self.cfg.feats_type) in [SignFeatsType.text, SignFeatsType.spot_align]: feats_path = None else: raise NotImplementedError( ( 'Features other than i3d, keypoints, rotational, spot_align, spot_align_albert, text or text_albert' 'are not available for How2Sign yet' ) ) if self.cfg.num_batch_buckets > 0 or self.cfg.tpu: raise NotImplementedError("Pending to implement bucket_pad_length_dataset wrapper") print(f'manifest_file {manifest_file}', flush=True) self.datasets[split] = SLTopicDetectionDataset.from_manifest_file( manifest_file=manifest_file, feats_path=feats_path, feats_type=self.cfg.feats_type, bodyparts=self.cfg.body_parts.split(','), feat_dims=[int(d) for d in self.cfg.feat_dims.split(',')], min_sample_size=self.cfg.min_source_positions, max_sample_size=self.cfg.max_source_positions, shuffle=self.cfg.shuffle_dataset, normalize=self.cfg.normalize, ) data = pd.read_csv(manifest_file, sep="\t") text_compressor = TextCompressor(level=self.cfg.text_compression_level) labels = [ text_compressor.compress(str(row['CATEGORY_ID'])) for _, row in data.iterrows() if row['VIDEO_ID'] not in self.datasets[split].skipped_ids ] assert len(labels) == len(self.datasets[split]), ( f"The length of the labels list ({len(labels)}) and the dataset length" f" after skipping some ids ({len(self.datasets[split].skipped_ids)})" f" do not match. Original dataset length is ({len(self.datasets[split])})" ) def process_label_fn(label): return torch.tensor([int(label)]) - 1 def label_len_fn(label): return len(torch.tensor([int(label)])) if SignFeatsType(self.cfg.feats_type) in [SignFeatsType.text, SignFeatsType.spot_align]: # TODO: decide if input text data should be compressed also def process_sentence_fn(sentence): tokens = self.source_dictionary.encode_line( self.bpe_tokenizer.encode(sentence), append_eos=False, add_if_not_exist=False, ) return tokens def sentence_len_fn(tokens): return tokens.numel() sentences = [ process_sentence_fn(row['TEXT']) for i, row in data.iterrows() if row['VIDEO_ID'] not in self.datasets[split].skipped_ids ] lengths = [sentence_len_fn(tokens) for tokens in sentences] assert len(sentences) == len(self.datasets[split]), ( f"The length of the sentences list ({len(sentences)}) and the dataset's length" f" after skipping some ids ({len(self.datasets[split].skipped_ids)})" f" do not match. Original dataset length is ({len(self.datasets[split])})" ) labels = [ torch.tensor([int(row['CATEGORY_ID'])]) - 1 for _, row in data.iterrows() if row['VIDEO_ID'] not in self.datasets[split].skipped_ids ] self.datasets[split] = LanguagePairDataset( src=sentences, src_sizes=lengths, src_dict=self.source_dictionary, tgt=labels, tgt_sizes=torch.ones(len(labels)), # targets have length 1 left_pad_source=False, # Since our target is a single class label, there's no need for # teacher forcing. If we set this to ``True`` then our Model's # ``forward()`` method would receive an additional argument called # *prev_output_tokens* that would contain a shifted version of the # target sequence. input_feeding=False, append_eos_to_target=False, eos=self.source_dictionary.eos(), ) else: self.datasets[split] = AddTargetDataset( self.datasets[split], labels, pad=0, eos=None, batch_targets=True, process_label=process_label_fn, label_len_fn=label_len_fn, add_to_input=False, ) @property def target_dictionary(self): return self.label_dict @property def source_dictionary(self): return self.src_dict def max_positions(self): return self.cfg.max_source_positions def get_interactive_tokens_and_lengths(self, lines, encode_fn): n_frames = [] for l in lines: h5_file, _id = l.split(':') feats_path = h5py.File(h5_file, "r") n_frames.append(np.array(feats_path[_id]).shape[0]) return lines, n_frames # TODO: Implement this method def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): raise NotImplementedError return SpeechToTextDataset( "interactive", False, self.data_cfg, src_tokens, src_lengths ) #Add this for validation def build_model(self, cfg, from_checkpoint=False): model = super().build_model(cfg) if from_checkpoint: pass # TODO: Implement this return model #Add this for validation def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.cfg.eval_accuracy: model.eval() with torch.no_grad(): out = model(sample['net_input']['src_tokens'], sample['net_input']['src_lengths']) preds = torch.argmax(self.softmax(out), dim=1) # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync targets = sample['target'] logging_output['_acc_counts_'] = sum( torch.eq( preds.flatten(), targets.flatten() ) ).item() logging_output['_acc_totals_'] = targets.flatten().shape[0] return loss, sample_size, logging_output def inference_step( self, sample, model, output_attentions=None, targets_container=None, preds_container=None, ): model.eval() with torch.no_grad(): if output_attentions: out = model( sample['net_input']['src_tokens'], sample['net_input']['src_lengths'], output_attentions=output_attentions ) else: out = model( sample['net_input']['src_tokens'], sample['net_input']['src_lengths'] ) preds = torch.argmax(self.softmax(out), dim=1) # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync targets = sample['target'] if targets_container is not None: targets_container.append(targets) if preds_container is not None: preds_container.append(preds) counts = sum( torch.eq( preds.flatten(), targets.flatten() ) ).item() total = targets.flatten().shape[0] return counts, total #Add this for validation def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.cfg.eval_accuracy: def sum_logs(key): result = sum(log.get(key, 0) for log in logging_outputs) if torch.is_tensor(result): result = result.cpu() return result counts, totals = [], [] counts.append(sum_logs('_acc_counts_')) totals.append(sum_logs('_acc_totals_')) if max(totals) > 0: # log counts as numpy arrays -- log_scalar will sum them correctly metrics.log_scalar('_acc_counts_', np.array(counts)) metrics.log_scalar('_acc_totals_', np.array(totals)) def compute_accuracy(meters): acc = meters['_acc_counts_'].sum[0] / meters['_acc_totals_'].sum[0] return round(acc, 2) metrics.log_derived('acc', compute_accuracy)
15,194
39.092348
159
py
sign-topic
sign-topic-main/fairseq/tasks/fairseq_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import warnings from argparse import Namespace from typing import Any, Callable, Dict, List import torch from fairseq import metrics, search, tokenizer, utils from fairseq.data import Dictionary, FairseqDataset, data_utils, encoders, iterators from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import gen_parser_from_dataclass from fairseq.optim.amp_optimizer import AMPOptimizer from omegaconf import DictConfig logger = logging.getLogger(__name__) class StatefulContainer(object): def __init__(self): self._state = dict() self._factories = dict() def add_factory(self, name, factory: Callable[[], Any]): self._factories[name] = factory def merge_state_dict(self, state_dict: Dict[str, Any]): self._state.update(state_dict) @property def state_dict(self) -> Dict[str, Any]: return self._state def __getattr__(self, name): if name not in self._state and name in self._factories: self._state[name] = self._factories[name]() if name in self._state: return self._state[name] raise AttributeError(f"Task state has no factory for attribute {name}") class FairseqTask(object): """ Tasks store dictionaries and provide helpers for loading/iterating over Datasets, initializing the Model/Criterion and calculating the loss. Tasks have limited statefulness. In particular, state that needs to be saved to/loaded from checkpoints needs to be stored in the `self.state` :class:`StatefulContainer` object. For example:: self.state.add_factory("dictionary", self.load_dictionary) print(self.state.dictionary) # calls self.load_dictionary() This is necessary so that when loading checkpoints, we can properly recreate the task state after initializing the task instance. """ @classmethod def add_args(cls, parser): """Add task-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) @staticmethod def logging_outputs_can_be_summed(criterion) -> bool: """ Whether the logging outputs returned by `train_step` and `valid_step` can be summed across workers prior to calling `aggregate_logging_outputs`. Setting this to True will improves distributed training speed. """ return criterion.logging_outputs_can_be_summed() def __init__(self, cfg: FairseqDataclass, **kwargs): self.cfg = cfg self.datasets = dict() self.dataset_to_epoch_iter = dict() self.state = StatefulContainer() @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ return Dictionary.load(filename) @classmethod def build_dictionary( cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8 ): """Build the dictionary Args: filenames (list): list of filenames workers (int): number of concurrent workers threshold (int): defines the minimum word count nwords (int): defines the total number of words in the final dictionary, including special symbols padding_factor (int): can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ d = Dictionary() for filename in filenames: Dictionary.add_file_to_dictionary( filename, d, tokenizer.tokenize_line, workers ) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @classmethod def setup_task(cls, cfg: DictConfig, **kwargs): """Setup the task (e.g., load dictionaries). Args: cfg (omegaconf.DictConfig): parsed command-line arguments """ return cls(cfg, **kwargs) def has_sharded_data(self, split): return os.pathsep in getattr(self.cfg, "data", "") def load_dataset( self, split: str, combine: bool = False, task_cfg: FairseqDataclass = None, **kwargs, ): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) combine (bool): combines a split segmented into pieces into one dataset task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used to load datasets """ raise NotImplementedError def dataset(self, split): """ Return a loaded dataset split. Args: split (str): name of the split (e.g., train, valid, test) Returns: a :class:`~fairseq.data.FairseqDataset` corresponding to *split* """ from fairseq.data import FairseqDataset if split not in self.datasets: raise KeyError("Dataset not loaded: " + split) if not isinstance(self.datasets[split], FairseqDataset): raise TypeError("Datasets are expected to be of type FairseqDataset") return self.datasets[split] def filter_indices_by_size( self, indices, dataset, max_positions=None, ignore_invalid_inputs=False ): """ Filter examples that are too large Args: indices (np.array): original array of sample indices dataset (~fairseq.data.FairseqDataset): dataset to batch max_positions (optional): max sentence length supported by the model (default: None). ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long (default: False). Returns: np.array: array of filtered sample indices """ indices, ignored = dataset.filter_indices_by_size(indices, max_positions) if len(ignored) > 0: if not ignore_invalid_inputs: raise Exception( ( "Size of sample #{} is invalid (={}) since max_positions={}, " "skip this example with --skip-invalid-size-inputs-valid-test" ).format(ignored[0], dataset.size(ignored[0]), max_positions) ) logger.warning( ( "{:,} samples have invalid sizes and will be skipped, " "max_positions={}, first few sample ids={}" ).format(len(ignored), max_positions, ignored[:10]) ) return indices def can_reuse_epoch_itr(self, dataset): # We can reuse the epoch iterator across epochs as long as the dataset # hasn't disabled it. We default to ``False`` here, although in practice # this will be ``True`` for most datasets that inherit from # ``FairseqDataset`` due to the base implementation there. return getattr(dataset, "can_reuse_epoch_itr_across_epochs", False) def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False, ): """ Get an iterator that yields batches of data from the given dataset. Args: dataset (~fairseq.data.FairseqDataset): dataset to batch max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). max_positions (optional): max sentence length supported by the model (default: None). ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long (default: False). required_batch_size_multiple (int, optional): require batch size to be a multiple of N (default: 1). seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). data_buffer_size (int, optional): number of batches to preload (default: 0). disable_iterator_cache (bool, optional): don't cache the EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`) (default: False). skip_remainder_batch (bool, optional): if set, discard the last batch in each training epoch, as the last batch is often smaller than local_batch_size * distributed_word_size (default: ``True``). grouped_shuffling (bool, optional): group batches with each groups containing num_shards batches and shuffle groups. Reduces difference between sequence lengths among workers for batches sorted by length. update_epoch_batch_itr (bool optional): if true then donot use the cached batch iterator for the epoch Returns: ~fairseq.iterators.EpochBatchIterator: a batched iterator over the given dataset split """ can_reuse_epoch_itr = ( not disable_iterator_cache and not update_epoch_batch_itr and self.can_reuse_epoch_itr(dataset) ) if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter: logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch)) return self.dataset_to_epoch_iter[dataset] assert isinstance(dataset, FairseqDataset) # initialize the dataset with the correct starting epoch dataset.set_epoch(epoch) # get indices ordered by example size with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() # filter examples that are too large if max_positions is not None: indices = self.filter_indices_by_size( indices, dataset, max_positions, ignore_invalid_inputs ) # create mini-batches with given size constraints batch_sampler = dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) # return a reusable, sharded iterator epoch_iter = iterators.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, buffer_size=data_buffer_size, skip_remainder_batch=skip_remainder_batch, grouped_shuffling=grouped_shuffling, ) if can_reuse_epoch_itr: self.dataset_to_epoch_iter[dataset] = epoch_iter return epoch_iter def build_model(self, cfg: FairseqDataclass): """ Build the :class:`~fairseq.models.BaseFairseqModel` instance for this task. Args: cfg (FairseqDataclass): configuration object Returns: a :class:`~fairseq.models.BaseFairseqModel` instance """ from fairseq import models, quantization_utils model = models.build_model(cfg, self) model = quantization_utils.quantize_model_scalar(model, cfg) return model def build_criterion(self, cfg: DictConfig): """ Build the :class:`~fairseq.criterions.FairseqCriterion` instance for this task. Args: cfg (omegaconf.DictConfig): configration object Returns: a :class:`~fairseq.criterions.FairseqCriterion` instance """ from fairseq import criterions return criterions.build_criterion(cfg, self) def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None, ): """ Build a :class:`~fairseq.SequenceGenerator` instance for this task. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models args (fairseq.dataclass.configs.GenerationConfig): configuration object (dataclass) for generation extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass through to SequenceGenerator prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]): If provided, this function constrains the beam search to allowed tokens only at each step. The provided function should take 2 arguments: the batch ID (`batch_id: int`) and a unidimensional tensor of token ids (`inputs_ids: torch.Tensor`). It has to return a `List[int]` with the allowed tokens for the next generation step conditioned on the previously generated tokens (`inputs_ids`) and the batch ID (`batch_id`). This argument is useful for constrained generation conditioned on the prefix, as described in "Autoregressive Entity Retrieval" (https://arxiv.org/abs/2010.00904) and https://github.com/facebookresearch/GENRE. """ if getattr(args, "score_reference", False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer( self.target_dictionary, compute_alignment=getattr(args, "print_alignment", False), ) from fairseq.sequence_generator import ( SequenceGenerator, SequenceGeneratorWithAlignment, ) # Choose search strategy. Defaults to Beam Search. sampling = getattr(args, "sampling", False) sampling_topk = getattr(args, "sampling_topk", -1) sampling_topp = getattr(args, "sampling_topp", -1.0) diverse_beam_groups = getattr(args, "diverse_beam_groups", -1) diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5) match_source_len = getattr(args, "match_source_len", False) diversity_rate = getattr(args, "diversity_rate", -1) constrained = getattr(args, "constraints", False) if prefix_allowed_tokens_fn is None: prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None) if ( sum( int(cond) for cond in [ sampling, diverse_beam_groups > 0, match_source_len, diversity_rate > 0, ] ) > 1 ): raise ValueError("Provided Search parameters are mutually exclusive.") assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling" assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling" if sampling: search_strategy = search.Sampling( self.target_dictionary, sampling_topk, sampling_topp ) elif diverse_beam_groups > 0: search_strategy = search.DiverseBeamSearch( self.target_dictionary, diverse_beam_groups, diverse_beam_strength ) elif match_source_len: # this is useful for tagging applications where the output # length should match the input length, so we hardcode the # length constraints for simplicity search_strategy = search.LengthConstrainedBeamSearch( self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0, ) elif diversity_rate > -1: search_strategy = search.DiverseSiblingsSearch( self.target_dictionary, diversity_rate ) elif constrained: search_strategy = search.LexicallyConstrainedBeamSearch( self.target_dictionary, args.constraints ) elif prefix_allowed_tokens_fn: search_strategy = search.PrefixConstrainedBeamSearch( self.target_dictionary, prefix_allowed_tokens_fn ) else: search_strategy = search.BeamSearch(self.target_dictionary) extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} if seq_gen_cls is None: if getattr(args, "print_alignment", False): seq_gen_cls = SequenceGeneratorWithAlignment extra_gen_cls_kwargs["print_alignment"] = args.print_alignment else: seq_gen_cls = SequenceGenerator return seq_gen_cls( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), search_strategy=search_strategy, **extra_gen_cls_kwargs, ) def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): """ Do forward and backward, and return the loss as computed by *criterion* for the given *model* and *sample*. Args: sample (dict): the mini-batch. The format is defined by the :class:`~fairseq.data.FairseqDataset`. model (~fairseq.models.BaseFairseqModel): the model criterion (~fairseq.criterions.FairseqCriterion): the criterion optimizer (~fairseq.optim.FairseqOptimizer): the optimizer update_num (int): the current update ignore_grad (bool): multiply loss by 0 if this is set to True Returns: tuple: - the loss - the sample size, which is used as the denominator for the gradient - logging outputs to display while training """ model.train() model.set_num_updates(update_num) with torch.autograd.profiler.record_function("forward"): with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))): loss, sample_size, logging_output = criterion(model, sample) if ignore_grad: loss *= 0 with torch.autograd.profiler.record_function("backward"): optimizer.backward(loss) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): loss, sample_size, logging_output = criterion(model, sample) return loss, sample_size, logging_output def optimizer_step(self, optimizer, model, update_num): optimizer.step() def build_dataset_for_inference( self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs ) -> torch.utils.data.Dataset: raise NotImplementedError def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints ) def begin_epoch(self, epoch, model): """Hook function called before the start of each epoch.""" pass def begin_valid_epoch(self, epoch, model): """Hook function called before the start of each validation epoch.""" pass def aggregate_logging_outputs(self, logging_outputs, criterion): """[deprecated] Aggregate logging outputs from data parallel training.""" utils.deprecation_warning( "The aggregate_logging_outputs API is deprecated. " "Please use the reduce_metrics API instead." ) with metrics.aggregate() as agg: self.reduce_metrics(logging_outputs, criterion) return agg.get_smoothed_values() def reduce_metrics(self, logging_outputs, criterion): """Aggregate logging outputs from data parallel training.""" # backward compatibility for tasks that override aggregate_logging_outputs base_func = FairseqTask.aggregate_logging_outputs self_func = getattr(self, "aggregate_logging_outputs").__func__ if self_func is not base_func: utils.deprecation_warning( "Tasks should implement the reduce_metrics API. " "Falling back to deprecated aggregate_logging_outputs API." ) agg_logging_outputs = self.aggregate_logging_outputs( logging_outputs, criterion ) for k, v in agg_logging_outputs.items(): metrics.log_scalar(k, v) return if not any("ntokens" in log for log in logging_outputs): warnings.warn( "ntokens not found in Criterion logging outputs, cannot log wpb or wps" ) else: ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) metrics.log_scalar("wpb", ntokens, priority=180, round=1) metrics.log_speed("wps", ntokens, priority=90, round=1) if not any("nsentences" in log for log in logging_outputs): warnings.warn( "nsentences not found in Criterion logging outputs, cannot log bsz" ) else: nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) metrics.log_scalar("bsz", nsentences, priority=190, round=1) criterion.__class__.reduce_metrics(logging_outputs) def state_dict(self): if self.state is not None: return self.state.state_dict return {} def load_state_dict(self, state_dict: Dict[str, Any]): if self.state is not None: self.state.merge_state_dict(state_dict) def max_positions(self): """Return the max input length allowed by the task.""" return None @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" raise NotImplementedError @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary` (if applicable for this task).""" raise NotImplementedError def build_tokenizer(self, args): """Build the pre-tokenizer for this task.""" return encoders.build_tokenizer(args) def build_bpe(self, args): """Build the tokenizer for this task.""" return encoders.build_bpe(args) def get_interactive_tokens_and_lengths(self, lines, encode_fn): tokens = [ self.source_dictionary.encode_line( encode_fn(src_str), add_if_not_exist=False ).long() for src_str in lines ] lengths = [t.numel() for t in tokens] return tokens, lengths class LegacyFairseqTask(FairseqTask): def __init__(self, args: Namespace): super().__init__(None) self.args = args self.datasets = {} self.dataset_to_epoch_iter = {} @classmethod def setup_task(cls, args: Namespace, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ return cls(args, **kwargs) def has_sharded_data(self, split): return os.pathsep in getattr(self.args, "data", "") def build_model(self, args: Namespace): """ Build the :class:`~fairseq.models.BaseFairseqModel` instance for this task. Args: args (argparse.Namespace): parsed command-line arguments Returns: a :class:`~fairseq.models.BaseFairseqModel` instance """ from fairseq import models, quantization_utils model = models.build_model(args, self) model = quantization_utils.quantize_model_scalar(model, args) return model def build_criterion(self, args: Namespace): """ Build the :class:`~fairseq.criterions.FairseqCriterion` instance for this task. Args: args (argparse.Namespace): parsed command-line arguments Returns: a :class:`~fairseq.criterions.FairseqCriterion` instance """ from fairseq import criterions return criterions.build_criterion(args, self)
26,329
37.214804
110
py
sign-topic
sign-topic-main/fairseq/tasks/speech_to_speech.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from argparse import Namespace import json import logging import math from pathlib import Path import torch import torch.nn as nn from fairseq import utils from fairseq.data import Dictionary from fairseq.data.audio.data_cfg import S2SDataConfig, MultitaskConfig from fairseq.data.audio.speech_to_speech_dataset import SpeechToSpeechDatasetCreator from fairseq.tasks import LegacyFairseqTask, register_task from fairseq.tasks.text_to_speech import batch_mel_cepstral_distortion logger = logging.getLogger(__name__) class StackUnitSequenceGenerator(nn.Module): def __init__(self, tgt_dict, vocab_size): super().__init__() self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() self.unk = tgt_dict.unk() self.offset = len(tgt_dict) - vocab_size self.vocab_size = vocab_size def pack_units(self, input: torch.Tensor, n_frames_per_step) -> torch.Tensor: if n_frames_per_step <= 1: return input bsz, _, n = input.shape assert n == n_frames_per_step scale = [ pow(self.vocab_size, n_frames_per_step - 1 - i) for i in range(n_frames_per_step) ] scale = torch.LongTensor(scale).squeeze(0).to(input.device) mask = input >= self.offset res = ((input - self.offset) * scale * mask).sum(dim=2) + self.offset return res @torch.no_grad() def generate(self, models, sample, **kwargs): # currently only support viterbi search for stacked units model = models[0] model.eval() max_len = model.max_decoder_positions() # TODO: incorporate max_len_a and max_len_b src_tokens = sample["net_input"]["src_tokens"] src_lengths = sample["net_input"]["src_lengths"] bsz, src_len, _ = src_tokens.size() n_frames_per_step = model.decoder.n_frames_per_step # initialize encoder_out = model.forward_encoder( src_tokens, src_lengths, speaker=sample["speaker"] ) incremental_state = {} pred_out, attn, scores = [], [], [] finished = src_tokens.new_zeros((bsz,)).bool() prev_output_tokens = src_lengths.new_zeros((bsz, 1)).long().fill_(self.eos) for _ in range(max_len): cur_out, cur_extra = model.forward_decoder( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, ) lprobs = model.get_normalized_probs([cur_out], log_probs=True) # never select pad, unk lprobs[:, :, self.pad] = -math.inf lprobs[:, :, self.unk] = -math.inf cur_pred_lprob, cur_pred_out = torch.max(lprobs, dim=2) scores.append(cur_pred_lprob) pred_out.append(cur_pred_out) prev_output_tokens = torch.cat( ( prev_output_tokens, self.pack_units( cur_pred_out.view(bsz, 1, n_frames_per_step), n_frames_per_step ), ), dim=1, ) attn.append(cur_extra["attn"][0]) cur_finished = torch.any(cur_pred_out.squeeze(1) == self.eos, dim=1) finished = finished | cur_finished if finished.sum().item() == bsz: break pred_out = torch.cat(pred_out, dim=1).view(bsz, -1) attn = torch.cat(attn, dim=2) alignment = attn.max(dim=1)[1] attn = attn.repeat_interleave(n_frames_per_step, dim=2) alignment = alignment.repeat_interleave(n_frames_per_step, dim=1) scores = torch.cat(scores, dim=1) eos_idx = (pred_out == self.eos).nonzero(as_tuple=True) out_lens = src_lengths.new_zeros((bsz,)).long().fill_(max_len) for b, l in zip(eos_idx[0], eos_idx[1]): out_lens[b] = min(l, out_lens[b]) hypos = [ [ { "tokens": pred_out[b, :out_len], "attn": attn[b, :, :out_len], "alignment": alignment[b, :out_len], "positional_scores": scores[b, :out_len], "score": utils.item(scores[b, :out_len].sum().data), } ] for b, out_len in zip(range(bsz), out_lens) ] return hypos @register_task("speech_to_speech") class SpeechToSpeechTask(LegacyFairseqTask): @classmethod def add_args(cls, parser): parser.add_argument("data", help="manifest root path") parser.add_argument( "--config-yaml", type=str, default="config.yaml", help="Configuration YAML filename (under manifest root)", ) parser.add_argument( "--max-source-positions", default=6000, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) parser.add_argument( "--target-is-code", action="store_true", help="set if target is discrete unit instead of spectrogram", ) parser.add_argument( "--target-code-size", type=int, default=None, help="# discrete units" ) parser.add_argument( "--n-frames-per-step", type=int, default=1, help="# stacked frames, use 0 for reduced discrete unit sequence", ) parser.add_argument( "--multitask-config-yaml", type=str, default=None, help="Configuration YAML filename for the multitasks (under manifest root)", ) parser.add_argument("--eval-inference", action="store_true") parser.add_argument( "--eval-args", type=str, default="{}", help='generation args for speech-to-unit model , e.g., \'{"beam": 5, "max_len_a": 1}\', as JSON string', ) parser.add_argument("--eos-prob-threshold", type=float, default=0.5) parser.add_argument( "--mcd-normalize-type", type=str, default="targ", choices=["targ", "pred", "path"], ) parser.add_argument( "--vocoder", type=str, default="griffin_lim", choices=["griffin_lim", "hifigan", "code_hifigan"], ) parser.add_argument("--spec-bwd-max-iter", type=int, default=8) def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict self.data_cfg = S2SDataConfig(Path(args.data) / args.config_yaml) self.multitask_tasks = {} if getattr(args, "multitask_config_yaml", None) is not None: multitask_cfg = MultitaskConfig( Path(args.data) / args.multitask_config_yaml ) for task_name, task_config in multitask_cfg.get_all_tasks().items(): self.multitask_tasks[task_name] = DummyMultiTask( task_config, task_config.tgt_dict ) @classmethod def setup_task(cls, args, **kwargs): tgt_dict = None if args.target_is_code: assert args.target_code_size is not None tgt_dict = Dictionary() for i in range(args.target_code_size): tgt_dict.add_symbol(str(i)) logger.info(f"dictionary size: " f"{len(tgt_dict):,}") if getattr(args, "train_subset", None) is not None: if not all(s.startswith("train") for s in args.train_subset.split(",")): raise ValueError('Train splits should be named like "train*".') assert args.n_frames_per_step >= 1 assert ( not args.eval_inference or (args.target_is_code and args.vocoder == "code_hifigan") or (not args.target_is_code and args.vocoder != "code_hifigan") ) return cls(args, tgt_dict) def build_criterion(self, args): from fairseq import criterions if len(self.multitask_tasks) > 0: if self.args.target_is_code and args._name != "speech_to_unit": raise ValueError( "set --criterion speech_to_unit for speech-to-unit loss with multitask" ) elif not self.args.target_is_code and args._name != "speech_to_spectrogram": raise ValueError( "set --criterion speech_to_spectrogram for speech-to-spectrogram loss with multitask" ) return criterions.build_criterion(args, self) def load_dataset(self, split, epoch=1, combine=False, **kwargs): self.datasets[split] = SpeechToSpeechDatasetCreator.from_tsv( self.args.data, self.data_cfg, split, is_train_split=split.startswith("train"), epoch=epoch, seed=self.args.seed, target_is_code=self.args.target_is_code, target_dictionary=self.target_dictionary, n_frames_per_step=self.args.n_frames_per_step, multitask=self.multitask_tasks, ) @property def target_dictionary(self): return self.tgt_dict @property def source_dictionary(self): return None def max_positions(self): return self.args.max_source_positions, self.args.max_target_positions def build_model(self, args): args.input_feat_per_channel = self.data_cfg.input_feat_per_channel args.input_channels = self.data_cfg.input_transformed_channels args.target_speaker_embed = self.data_cfg.target_speaker_embed is not None args.n_frames_per_step = self.args.n_frames_per_step model = super().build_model(args) if len(self.multitask_tasks) > 0: from fairseq.models.speech_to_speech.s2s_transformer import ( S2STransformerMultitaskModelBase, ) assert isinstance(model, S2STransformerMultitaskModelBase) if self.args.eval_inference: self.eval_gen_args = json.loads(self.args.eval_args) self.generator = self.build_generator( [model], Namespace(**self.eval_gen_args) ) return model def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, ): if not self.args.target_is_code or self.args.eval_inference: from fairseq.models.text_to_speech.vocoder import get_vocoder self.vocoder = get_vocoder(self.args, self.data_cfg) self.vocoder = ( self.vocoder.cuda() if torch.cuda.is_available() and not self.args.cpu else self.vocoder.cpu() ) if self.args.target_is_code: if self.args.n_frames_per_step == 1: seq_generator = super().build_generator( models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs, ) else: assert ( getattr(args, "beam", 1) == 1 and getattr(args, "nbest", 1) == 1 ), "only support viterbi search for stacked units" seq_generator = StackUnitSequenceGenerator( self.tgt_dict, self.args.target_code_size, ) else: if getattr(args, "teacher_forcing", False): from fairseq.speech_generator import ( TeacherForcingAutoRegressiveSpeechGenerator, ) generator = TeacherForcingAutoRegressiveSpeechGenerator logger.info("Teacher forcing mode for generation") else: from fairseq.speech_generator import AutoRegressiveSpeechGenerator generator = AutoRegressiveSpeechGenerator seq_generator = generator( models[0], self.vocoder, self.data_cfg, max_iter=self.args.max_target_positions, eos_prob_threshold=self.args.eos_prob_threshold, ) return seq_generator def train_step( self, sample, model, criterion, optimizer, update_num, ignore_grad=False ): for task_name, task_obj in self.multitask_tasks.items(): criterion.set_multitask_loss_weight( task_name, task_obj.args.get_loss_weight(update_num) ) loss, sample_size, logging_output = super().train_step( sample, model, criterion, optimizer, update_num, ignore_grad ) return loss, sample_size, logging_output def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.args.eval_inference: hypos, inference_losses = self.valid_step_with_inference( sample, model, self.generator ) for k, v in inference_losses.items(): assert k not in logging_output logging_output[k] = v return loss, sample_size, logging_output def valid_step_with_inference(self, sample, model, generator): if self.args.target_is_code: hypos = generator.generate([model], sample) tgt_lens = ( sample["target_lengths"] - 1 ) * self.args.n_frames_per_step # strip <eos> for b, (f, l) in enumerate(zip(sample["target"], tgt_lens)): hypos[b][0]["targ_waveform"] = self.vocoder( {"code": f[:l] - 4}, # remove <bos>, <pad>, <eos>, <unk> dur_prediction=self.eval_gen_args.get("dur_prediction", False), ) if len(hypos[b][0]["tokens"]) > 0: hypos[b][0]["waveform"] = self.vocoder( {"code": hypos[b][0]["tokens"] - 4}, dur_prediction=self.eval_gen_args.get("dur_prediction", False), ) else: hypos[b][0]["waveform"] = torch.flip( hypos[b][0]["targ_waveform"], dims=[0] ) else: hypos = [ [hypo] for hypo in generator.generate(model, sample, has_targ=True) ] losses = { "mcd_loss": 0.0, "targ_frames": 0.0, "pred_frames": 0.0, "path_frames": 0.0, "nins": 0.0, "ndel": 0.0, } rets = batch_mel_cepstral_distortion( [hypo[0]["targ_waveform"] for hypo in hypos], [hypo[0]["waveform"] for hypo in hypos], self.data_cfg.output_sample_rate, normalize_type=None, ) for d, extra in rets: pathmap = extra[-1] losses["mcd_loss"] += d.item() losses["targ_frames"] += pathmap.size(0) losses["pred_frames"] += pathmap.size(1) losses["path_frames"] += pathmap.sum().item() losses["nins"] += (pathmap.sum(dim=1) - 1).sum().item() losses["ndel"] += (pathmap.sum(dim=0) - 1).sum().item() losses["norm_frames"] = losses[ f"{getattr(self.args, 'mcd_normalize_type', 'targ')}_frames" ] return hypos, losses class DummyMultiTask(LegacyFairseqTask): def __init__(self, args, tgt_dict): super().__init__(args) self.tgt_dict = tgt_dict @property def target_dictionary(self): return self.tgt_dict def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): if self.args.decoder_type == "ctc": model = models[0] # only support single model encoder_out = model(**sample) if hasattr(model, "get_logits"): emissions = model.get_logits( encoder_out ) # no need to normalize emissions else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return generator.decode( emissions.transpose(0, 1).float().cpu().contiguous() ) else: raise NotImplementedError("only ctc decoder is supported at the moment") def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if self.args.decoder_type == "ctc": from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder return W2lViterbiDecoder(args, self.tgt_dict) else: raise NotImplementedError("only ctc decoder is supported at the moment")
17,278
35.530655
116
py
sign-topic
sign-topic-main/fairseq/tasks/audio_finetuning.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import os import torch import json from argparse import Namespace from dataclasses import dataclass, field from typing import Optional, Any from fairseq.data import AddTargetDataset, Dictionary, encoders from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import GenerationConfig from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel from . import register_task from .. import utils from ..logging import metrics logger = logging.getLogger(__name__) class LabelEncoder(object): def __init__(self, dictionary): self.dictionary = dictionary def __call__(self, label): return self.dictionary.encode_line( label, append_eos=False, add_if_not_exist=False ) def label_len_fn(label): return len(label.split(" ")) @dataclass class AudioFinetuningConfig(AudioPretrainingConfig): # Options for reporting WER metrics during validation. Only applicable to # Seq2Seq models during fine-tuning eval_wer: bool = field( default=False, metadata={"help": "compute WER for Seq2Seq models"} ) eval_wer_config: GenerationConfig = field( default_factory=lambda: GenerationConfig(), metadata={"help": "beam search config for evaluating wer during training"}, ) eval_wer_tokenizer: Any = field( default=None, metadata={"help": "tokenizer config for evaluating wer during training"}, ) eval_wer_post_process: str = field( default="letter", metadata={ "help": "remove BPE tokens before scoring (can be sentencepiece, letter, and more)" }, ) eval_bleu: bool = field( default=False, metadata={"help": "evaluation with BLEU scores"} ) eval_bleu_detok: Optional[str] = field( default=None, metadata={ "help": "detokenize before computing BLEU (e.g., 'moses'); " "required if using --eval-bleu; use 'space' to disable " "detokenization; see fairseq.data.encoders for other options" }, ) eval_bleu_detok_args: str = field( default="{}", metadata={"help": "args for building the tokenizer, if needed"} ) eval_tokenized_bleu: bool = field( default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"} ) eval_bleu_remove_bpe: Optional[str] = field( default=None, metadata={"help": "remove BPE before computing BLEU"} ) eval_bleu_args: str = field( default="{}", metadata={ "help": "generation args for BLUE scoring, e.g., " '\'{"beam": 4, "lenpen": 0.6}\'' }, ) eval_bleu_print_samples: bool = field( default=False, metadata={"help": "print sample generations during validation"} ) autoregressive: bool = field( default=False, metadata={ "help": "required for autoregressive decoders (like seq2seq models); " "adds 'prev_output_tokens' to input and appends eos to target" }, ) @register_task("audio_finetuning", dataclass=AudioFinetuningConfig) class AudioFinetuningTask(AudioPretrainingTask): """ """ cfg: AudioFinetuningConfig def __init__( self, cfg: AudioFinetuningConfig, ): super().__init__(cfg) self.blank_symbol = "<s>" self.state.add_factory("target_dictionary", self.load_target_dictionary) def load_target_dictionary(self): if self.cfg.labels: dict_path = os.path.join(self.cfg.data, f"dict.{self.cfg.labels}.txt") return Dictionary.load(dict_path) return None def load_dataset( self, split: str, task_cfg: AudioFinetuningConfig = None, **kwargs ): super().load_dataset(split, task_cfg, **kwargs) task_cfg = task_cfg or self.cfg assert task_cfg.labels is not None text_compression_level = getattr( TextCompressionLevel, str(self.cfg.text_compression_level) ) data_path = self.cfg.data label_path = os.path.join(data_path, f"{split}.{task_cfg.labels}") skipped_indices = getattr(self.datasets[split], "skipped_indices", set()) text_compressor = TextCompressor(level=text_compression_level) with open(label_path, "r") as f: labels = [ text_compressor.compress(l) for i, l in enumerate(f) if i not in skipped_indices ] assert len(labels) == len(self.datasets[split]), ( f"labels length ({len(labels)}) and dataset length " f"({len(self.datasets[split])}) do not match" ) process_label = LabelEncoder(self.target_dictionary) self.datasets[split] = AddTargetDataset( self.datasets[split], labels, pad=self.target_dictionary.pad(), eos=self.target_dictionary.eos(), batch_targets=True, process_label=process_label, label_len_fn=label_len_fn, add_to_input=task_cfg.get("autoregressive", False), text_compression_level=text_compression_level, ) @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.state.target_dictionary def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) if self.cfg.eval_wer and self.cfg.autoregressive: metrics = self._inference_with_wer(self.sequence_generator, sample, model) logging_output["_num_char_errors"] = metrics["num_char_errors"] logging_output["_num_chars"] = metrics["num_chars"] logging_output["_num_word_errors"] = metrics["num_word_errors"] logging_output["_num_words"] = metrics["num_words"] if self.cfg.eval_bleu and self.cfg.autoregressive: metrics = self._inference_with_bleu(self.sequence_generator, sample, model) logging_output["_bleu_sys_len"] = metrics.sys_len logging_output["_bleu_ref_len"] = metrics.ref_len # we split counts into separate entries so that they can be # summed efficiently across workers using fast-stat-sync assert len(metrics.counts) == 4 for i in range(4): logging_output[f"_bleu_counts_{i}"] = metrics.counts[i] logging_output[f"_bleu_totals_{i}"] = metrics.totals[i] return loss, sample_size, logging_output def build_model(self, model_cfg: FairseqDataclass): model = super().build_model(model_cfg) if self.cfg.eval_wer and self.cfg.autoregressive: self.sequence_generator = self.build_generator( [model], self.cfg.eval_wer_config, ) if self.cfg.eval_wer_tokenizer: self.tokenizer = encoders.build_tokenizer(self.cfg.eval_wer_tokenizer) else: self.tokenizer = None if self.cfg.eval_bleu and self.cfg.autoregressive: assert self.cfg.eval_bleu_detok is not None, ( "--eval-bleu-detok is required if using --eval-bleu; " "try --eval-bleu-detok=moses (or --eval-bleu-detok=space " "to disable detokenization, e.g., when using sentencepiece)" ) detok_args = json.loads(self.cfg.eval_bleu_detok_args) self.tokenizer = encoders.build_tokenizer( Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args) ) gen_args = json.loads(self.cfg.eval_bleu_args) gen_args = Namespace(**gen_args) self.sequence_generator = self.build_generator([model], gen_args) return model def _inference_with_wer(self, generator, sample, model): import editdistance def decode(toks): s = self.target_dictionary.string( toks.int().cpu(), self.cfg.eval_wer_post_process, escape_unk=True, ) if self.tokenizer: s = self.tokenizer.decode(s) return s num_word_errors, num_char_errors = 0, 0 num_chars, num_words = 0, 0 gen_out = self.inference_step(generator, [model], sample, None) for i in range(len(gen_out)): hyp = decode(gen_out[i][0]["tokens"]) ref = decode( utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), ) num_char_errors += editdistance.eval(hyp, ref) num_chars += len(ref) hyp_words = hyp.split() ref_words = ref.split() num_word_errors += editdistance.eval(hyp_words, ref_words) num_words += len(ref_words) return { "num_char_errors": num_char_errors, "num_chars": num_chars, "num_word_errors": num_word_errors, "num_words": num_words, } def _inference_with_bleu(self, generator, sample, model): import sacrebleu def decode(toks, is_ref): s = self.target_dictionary.string( toks.int().cpu(), self.cfg.eval_bleu_remove_bpe, # The default unknown string in fairseq is `<unk>`, but # this is tokenized by sacrebleu as `< unk >`, inflating # BLEU scores. Instead, we use a somewhat more verbose # alternative that is unlikely to appear in the real # reference, but doesn't get split into multiple tokens. unk_string=("UNKNOWNTOKENINREF" if is_ref else "UNKNOWNTOKENINHYP"), ) if self.tokenizer: s = self.tokenizer.decode(s) return s gen_out = self.inference_step(generator, [model], sample) hyps, refs = [], [] for i in range(len(gen_out)): hyps.append(decode(gen_out[i][0]["tokens"], is_ref=False)) refs.append( decode( utils.strip_pad(sample["target"][i], self.target_dictionary.pad()), is_ref=True, # don't count <unk> as matches to the hypo ) ) if self.cfg.eval_bleu_print_samples: logger.info("H-{} {}".format(sample["id"][0], hyps[0])) logger.info("T-{} {}".format(sample["id"][0], refs[0])) eval_tokenization = "none" if self.cfg.eval_tokenized_bleu else "13a" return sacrebleu.corpus_bleu(hyps, [refs], tokenize=eval_tokenization) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) if self.cfg.eval_wer: zero = torch.scalar_tensor(0.0) num_char_errors = sum( log.get("_num_char_errors", zero) for log in logging_outputs ) num_chars = sum(log.get("_num_chars", zero) for log in logging_outputs) num_word_errors = sum( log.get("_num_word_errors", zero) for log in logging_outputs ) num_words = sum(log.get("_num_words", zero) for log in logging_outputs) metrics.log_scalar("_num_char_errors", num_char_errors) metrics.log_scalar("_num_chars", num_chars) metrics.log_scalar("_num_word_errors", num_word_errors) metrics.log_scalar("_num_words", num_words) if num_chars > 0: metrics.log_derived( "uer", lambda meters: meters["_num_char_errors"].sum * 100.0 / meters["_num_chars"].sum if meters["_num_chars"].sum > 0 else float("nan"), ) if num_words > 0: metrics.log_derived( "wer", lambda meters: meters["_num_word_errors"].sum * 100.0 / meters["_num_words"].sum if meters["_num_words"].sum > 0 else float("nan"), ) if self.cfg.eval_bleu: len_keys = ["_bleu_sys_len", "_bleu_ref_len"] count_keys = [f"_bleu_counts_{i}" for i in range(4)] total_keys = [f"_bleu_totals_{i}" for i in range(4)] for k in len_keys + count_keys + total_keys: metrics.log_scalar(k, sum(log.get(k, 0) for log in logging_outputs)) import sacrebleu metrics.log_derived( "bleu", lambda meters: sacrebleu.compute_bleu( correct=[meters[k].sum for k in count_keys], total=[meters[k].sum for k in total_keys], sys_len=meters["_bleu_sys_len"].sum, ref_len=meters["_bleu_ref_len"].sum, smooth_method="exp", ).score, )
13,463
38.139535
95
py
sign-topic
sign-topic-main/fairseq/tasks/translation_multi_simple_epoch.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import datetime import logging import time import torch from fairseq.data import ( FairseqDataset, LanguagePairDataset, ListDataset, data_utils, iterators, ) from fairseq.data.multilingual.multilingual_data_manager import ( MultilingualDatasetManager, ) from fairseq.data.multilingual.sampling_method import SamplingMethod from fairseq.tasks import LegacyFairseqTask, register_task from fairseq.utils import FileContentsAction ### def get_time_gap(s, e): return ( datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) ).__str__() ### logger = logging.getLogger(__name__) @register_task("translation_multi_simple_epoch") class TranslationMultiSimpleEpochTask(LegacyFairseqTask): """ Translate from one (source) language to another (target) language. Args: langs (List[str]): a list of languages that are being supported dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries training (bool): whether the task should be configured for training or not .. note:: The translation task is compatible with :mod:`fairseq-train`, :mod:`fairseq-generate` and :mod:`fairseq-interactive`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" # fmt: off parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='inference source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='inference target language') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr', action=FileContentsAction) parser.add_argument('--keep-inference-langtok', action='store_true', help='keep language tokens in inference output (e.g. for analysis or debugging)') SamplingMethod.add_arguments(parser) MultilingualDatasetManager.add_args(parser) # fmt: on def __init__(self, args, langs, dicts, training): super().__init__(args) self.langs = langs self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs else: self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)] # eval_lang_pairs for multilingual translation is usually all of the # lang_pairs. However for other multitask settings or when we want to # optimize for certain languages we want to use a different subset. Thus # the eval_lang_pairs class variable is provided for classes that extend # this class. self.eval_lang_pairs = self.lang_pairs # model_lang_pairs will be used to build encoder-decoder model pairs in # models.build_model(). This allows multitask type of sub-class can # build models other than the input lang_pairs self.model_lang_pairs = self.lang_pairs self.source_langs = [d.split("-")[0] for d in self.lang_pairs] self.target_langs = [d.split("-")[1] for d in self.lang_pairs] self.check_dicts(self.dicts, self.source_langs, self.target_langs) self.sampling_method = SamplingMethod.build_sampler(args, self) self.data_manager = MultilingualDatasetManager.setup_data_manager( args, self.lang_pairs, langs, dicts, self.sampling_method ) def check_dicts(self, dicts, source_langs, target_langs): if self.args.source_dict is not None or self.args.target_dict is not None: # no need to check whether the source side and target side are sharing dictionaries return src_dict = dicts[source_langs[0]] tgt_dict = dicts[target_langs[0]] for src_lang in source_langs: assert ( src_dict == dicts[src_lang] ), "Diffrent dictionary are specified for different source languages; " "TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages" for tgt_lang in target_langs: assert ( tgt_dict == dicts[tgt_lang] ), "Diffrent dictionary are specified for different target languages; " "TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages" @classmethod def setup_task(cls, args, **kwargs): langs, dicts, training = MultilingualDatasetManager.prepare( cls.load_dictionary, args, **kwargs ) return cls(args, langs, dicts, training) def has_sharded_data(self, split): return self.data_manager.has_sharded_data(split) def load_dataset(self, split, epoch=1, combine=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if split in self.datasets: dataset = self.datasets[split] if self.has_sharded_data(split): if self.args.virtual_epoch_size is not None: if dataset.load_next_shard: shard_epoch = dataset.shard_epoch else: # no need to load next shard so skip loading # also this avoid always loading from beginning of the data return else: shard_epoch = epoch else: # estimate the shard epoch from virtual data size and virtual epoch size shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch) logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}") logger.info(f"mem usage: {data_utils.get_mem_usage()}") if split in self.datasets: del self.datasets[split] logger.info("old dataset deleted manually") logger.info(f"mem usage: {data_utils.get_mem_usage()}") self.datasets[split] = self.data_manager.load_dataset( split, self.training, epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs, ) def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None): if constraints is not None: raise NotImplementedError( "Constrained decoding with the multilingual_translation task is not supported" ) src_data = ListDataset(src_tokens, src_lengths) dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary) src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"] if self.args.lang_tok_replacing_bos_eos: dataset = self.data_manager.alter_dataset_langtok( dataset, src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec, ) else: dataset.src = self.data_manager.src_dataset_tranform_func( self.args.source_lang, self.args.target_lang, dataset=dataset.src, spec=src_langtok_spec, ) return dataset def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, ): if not getattr(args, "keep_inference_langtok", False): _, tgt_langtok_spec = self.args.langtoks["main"] if tgt_langtok_spec: tgt_lang_tok = self.data_manager.get_decoder_langtok( self.args.target_lang, tgt_langtok_spec ) extra_gen_cls_kwargs = extra_gen_cls_kwargs or {} extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok} return super().build_generator( models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) def build_model(self, args): return super().build_model(args) def valid_step(self, sample, model, criterion): loss, sample_size, logging_output = super().valid_step(sample, model, criterion) return loss, sample_size, logging_output def inference_step( self, generator, models, sample, prefix_tokens=None, constraints=None ): with torch.no_grad(): _, tgt_langtok_spec = self.args.langtoks["main"] if not self.args.lang_tok_replacing_bos_eos: if prefix_tokens is None and tgt_langtok_spec: tgt_lang_tok = self.data_manager.get_decoder_langtok( self.args.target_lang, tgt_langtok_spec ) src_tokens = sample["net_input"]["src_tokens"] bsz = src_tokens.size(0) prefix_tokens = ( torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens) ) return generator.generate( models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) else: return generator.generate( models, sample, prefix_tokens=prefix_tokens, bos_token=self.data_manager.get_decoder_langtok( self.args.target_lang, tgt_langtok_spec ) if tgt_langtok_spec else self.target_dictionary.eos(), ) def reduce_metrics(self, logging_outputs, criterion): super().reduce_metrics(logging_outputs, criterion) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): return self.data_manager.get_source_dictionary(self.source_langs[0]) @property def target_dictionary(self): return self.data_manager.get_target_dictionary(self.target_langs[0]) def create_batch_sampler_func( self, max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=1, seed=1, ): def construct_batch_sampler(dataset, epoch): splits = [ s for s, _ in self.datasets.items() if self.datasets[s] == dataset ] split = splits[0] if len(splits) > 0 else None # NEW implementation if epoch is not None: # initialize the dataset with the correct starting epoch dataset.set_epoch(epoch) # get indices ordered by example size start_time = time.time() logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}") with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() logger.info( f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}" ) logger.info(f"mem usage: {data_utils.get_mem_usage()}") # filter examples that are too large if max_positions is not None: my_time = time.time() indices = self.filter_indices_by_size( indices, dataset, max_positions, ignore_invalid_inputs ) logger.info( f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}" ) logger.info(f"mem usage: {data_utils.get_mem_usage()}") # create mini-batches with given size constraints my_time = time.time() batch_sampler = dataset.batch_by_size( indices, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) logger.info( f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}" ) logger.info( f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}" ) logger.info(f"mem usage: {data_utils.get_mem_usage()}") return batch_sampler return construct_batch_sampler # we need to override get_batch_iterator because we want to reset the epoch iterator each time def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False, ): """ Get an iterator that yields batches of data from the given dataset. Args: dataset (~fairseq.data.FairseqDataset): dataset to batch max_tokens (int, optional): max number of tokens in each batch (default: None). max_sentences (int, optional): max number of sentences in each batch (default: None). max_positions (optional): max sentence length supported by the model (default: None). ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long (default: False). required_batch_size_multiple (int, optional): require batch size to be a multiple of N (default: 1). seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 0). data_buffer_size (int, optional): number of batches to preload (default: 0). disable_iterator_cache (bool, optional): don't cache the EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`) (default: False). grouped_shuffling (bool, optional): group batches with each groups containing num_shards batches and shuffle groups. Reduces difference between sequence lengths among workers for batches sorted by length. update_epoch_batch_itr (bool optional): if true then donot use the cached batch iterator for the epoch Returns: ~fairseq.iterators.EpochBatchIterator: a batched iterator over the given dataset split """ # initialize the dataset with the correct starting epoch assert isinstance(dataset, FairseqDataset) if dataset in self.dataset_to_epoch_iter: return self.dataset_to_epoch_iter[dataset] if self.args.sampling_method == "RoundRobin": batch_iter = super().get_batch_iterator( dataset, max_tokens=max_tokens, max_sentences=max_sentences, max_positions=max_positions, ignore_invalid_inputs=ignore_invalid_inputs, required_batch_size_multiple=required_batch_size_multiple, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, data_buffer_size=data_buffer_size, disable_iterator_cache=disable_iterator_cache, skip_remainder_batch=skip_remainder_batch, update_epoch_batch_itr=update_epoch_batch_itr, ) self.dataset_to_epoch_iter[dataset] = batch_iter return batch_iter construct_batch_sampler = self.create_batch_sampler_func( max_positions, ignore_invalid_inputs, max_tokens, max_sentences, required_batch_size_multiple=required_batch_size_multiple, seed=seed, ) epoch_iter = iterators.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=construct_batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, ) return epoch_iter
17,886
39.468326
113
py
sign-topic
sign-topic-main/docs/conf.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # fairseq documentation build configuration file, created by # sphinx-quickstart on Fri Aug 17 21:45:30 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys from fairseq import __version__ # source code directory, relative to this file, for sphinx-autobuild sys.path.insert(0, os.path.abspath("..")) source_suffix = [".rst"] # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinxarg.ext", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The master toctree document. master_doc = "index" # General information about the project. project = "fairseq" copyright = "Facebook AI Research (FAIR)" author = "Facebook AI Research (FAIR)" github_doc_root = "https://github.com/pytorch/fairseq/tree/main/docs/" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" highlight_language = "python" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_context = { "css_files": [ "_static/theme_overrides.css", # override wide tables in RTD theme ], } # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars # html_sidebars = { # '**': [ # 'about.html', # 'navigation.html', # 'relations.html', # needs 'show_related': True theme option to display # 'searchbox.html', # 'donate.html', # ] # } # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "numpy": ("http://docs.scipy.org/doc/numpy/", None), "python": ("https://docs.python.org/", None), "torch": ("https://pytorch.org/docs/master/", None), }
4,270
30.637037
80
py
sign-topic
sign-topic-main/fairseq_cli/generate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import ast import logging import math import os import sys from argparse import Namespace from itertools import chain import numpy as np import torch from omegaconf import DictConfig from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter def main(cfg: DictConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) assert cfg.common_eval.path is not None, "--path required for generation!" assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw" ), "--replace-unk requires a raw text dataset (--dataset-impl=raw)" if cfg.common_eval.results_path is not None: os.makedirs(cfg.common_eval.results_path, exist_ok=True) output_path = os.path.join( cfg.common_eval.results_path, "generate-{}.txt".format(cfg.dataset.gen_subset), ) with open(output_path, "w", buffering=1, encoding="utf-8") as h: return _main(cfg, h) else: return _main(cfg, sys.stdout) def get_symbols_to_strip_from_output(generator): if hasattr(generator, "symbols_to_strip_from_output"): return generator.symbols_to_strip_from_output else: return {generator.eos} def _main(cfg: DictConfig, output_file): logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=output_file, ) logger = logging.getLogger("fairseq_cli.generate") utils.import_user_module(cfg.common) if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 12000 logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Load dataset splits task = tasks.setup_task(cfg.task) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary overrides = ast.literal_eval(cfg.common_eval.model_overrides) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task) if cfg.generation.lm_path is not None: overrides["data"] = cfg.task.data try: lms, _ = checkpoint_utils.load_model_ensemble( [cfg.generation.lm_path], arg_overrides=overrides, task=None ) except: logger.warning( f"Failed to load language model! Please make sure that the language model dict is the same " f"as target dict and is located in the data dir ({cfg.task.data})" ) raise assert len(lms) == 1 else: lms = [None] # Optimize ensemble for generation for model in chain(models, lms): if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(cfg.dataset.gen_subset), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models] ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=cfg.distributed_training.distributed_world_size, shard_id=cfg.distributed_training.distributed_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) # Initialize generator gen_timer = StopwatchMeter() extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight} generator = task.build_generator( models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer = scoring.build_scorer(cfg.scoring, tgt_dict) num_sentences = 0 has_target = True wps_meter = TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if cfg.generation.prefix_size > 0: prefix_tokens = sample["target"][:, : cfg.generation.prefix_size] constraints = None if "constraints" in sample: constraints = sample["constraints"] gen_timer.start() hypos = task.inference_step( generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): has_target = sample["target"] is not None # Remove padding if "src_tokens" in sample["net_input"]: src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) else: src_tokens = None target_tokens = None if has_target: target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text( sample_id ) target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) else: src_str = "" if has_target: target_str = tgt_dict.string( target_tokens, cfg.common_eval.post_process, escape_unk=True, extra_symbols_to_ignore=get_symbols_to_strip_from_output( generator ), ) src_str = decode_fn(src_str) if has_target: target_str = decode_fn(target_str) if not cfg.common_eval.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str), file=output_file) if has_target: print("T-{}\t{}".format(sample_id, target_str), file=output_file) # Process top predictions for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) if not cfg.common_eval.quiet: score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print( "H-{}\t{}\t{}".format(sample_id, score, hypo_str), file=output_file, ) # detokenized hypothesis print( "D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str), file=output_file, ) print( "P-{}\t{}".format( sample_id, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"] .div_(math.log(2)) .tolist(), ) ), ), file=output_file, ) if cfg.generation.print_alignment == "hard": print( "A-{}\t{}".format( sample_id, " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment ] ), ), file=output_file, ) if cfg.generation.print_alignment == "soft": print( "A-{}\t{}".format( sample_id, " ".join( [",".join(src_probs) for src_probs in alignment] ), ), file=output_file, ) if cfg.generation.print_step: print( "I-{}\t{}".format(sample_id, hypo["steps"]), file=output_file, ) if cfg.generation.retain_iter_history: for step, h in enumerate(hypo["history"]): _, h_str, _ = utils.post_process_prediction( hypo_tokens=h["tokens"].int().cpu(), src_str=src_str, alignment=None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=None, ) print( "E-{}_{}\t{}".format(sample_id, step, h_str), file=output_file, ) # Score only the top hypothesis if has_target and j == 0: if ( align_dict is not None or cfg.common_eval.post_process is not None ): # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line( target_str, add_if_not_exist=True ) hypo_tokens = tgt_dict.encode_line( detok_hypo_str, add_if_not_exist=True ) if hasattr(scorer, "add_string"): scorer.add_string(target_str, detok_hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) progress.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info( "Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) if has_target: if cfg.bpe and not cfg.generation.sacrebleu: if cfg.common_eval.post_process: logger.warning( "BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization" ) else: logger.warning( "If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization" ) # use print to be consistent with other main outputs: S-, H-, T-, D- and so on print( "Generate {} with beam={}: {}".format( cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string() ), file=output_file, ) return scorer def cli_main(): parser = options.get_generation_parser() # TODO: replace this workaround with refactoring of `AudioPretraining` parser.add_argument( "--arch", "-a", metavar="ARCH", default="wav2vec2", help="Model architecture. For constructing tasks that rely on " "model args (e.g. `AudioPretraining`)", ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
15,805
36.813397
180
py
sign-topic
sign-topic-main/fairseq_cli/validate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from argparse import Namespace from itertools import chain import torch from omegaconf import DictConfig from fairseq import checkpoint_utils, distributed_utils, options, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import metrics, progress_bar from fairseq.utils import reset_logging logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.validate") def main(cfg: DictConfig, override_args=None): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) reset_logging() assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_world_size > 1: data_parallel_world_size = distributed_utils.get_data_parallel_world_size() data_parallel_rank = distributed_utils.get_data_parallel_rank() else: data_parallel_world_size = 1 data_parallel_rank = 0 if override_args is not None: overrides = vars(override_args) overrides.update(eval(getattr(override_args, "model_overrides", "{}"))) else: overrides = None # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=overrides, suffix=cfg.checkpoint.checkpoint_suffix, ) model = models[0] # Move models to GPU for model in models: model.eval() if use_fp16: model.half() if use_cuda: model.cuda() # Print args logger.info(saved_cfg) # Build criterion criterion = task.build_criterion(saved_cfg.criterion) criterion.eval() for subset in cfg.dataset.valid_subset.split(","): try: task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task) dataset = task.dataset(subset) except KeyError: raise Exception("Cannot find dataset: " + subset) # Initialize data iterator itr = task.get_batch_iterator( dataset=dataset, max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models], ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=data_parallel_world_size, shard_id=data_parallel_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) log_outputs = [] for i, sample in enumerate(progress): sample = utils.move_to_cuda(sample) if use_cuda else sample _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) if data_parallel_world_size > 1: log_outputs = distributed_utils.all_gather_list( log_outputs, max_size=cfg.common.all_gather_list_size, group=distributed_utils.get_data_parallel_group(), ) log_outputs = list(chain.from_iterable(log_outputs)) with metrics.aggregate() as agg: task.reduce_metrics(log_outputs, criterion) log_output = agg.get_smoothed_values() progress.print(log_output, tag=subset, step=i) def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True) distributed_utils.call_main( convert_namespace_to_omegaconf(args), main, override_args=override_args ) if __name__ == "__main__": cli_main()
5,228
32.954545
88
py
sign-topic
sign-topic-main/fairseq_cli/hydra_train.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import hydra import torch from hydra.core.hydra_config import HydraConfig from omegaconf import OmegaConf, open_dict from fairseq import distributed_utils, metrics from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.initialize import add_defaults, hydra_init from fairseq.dataclass.utils import omegaconf_no_object_check from fairseq.utils import reset_logging from fairseq_cli.train import main as pre_main logger = logging.getLogger("fairseq_cli.hydra_train") @hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config") def hydra_main(cfg: FairseqConfig) -> float: _hydra_main(cfg) def _hydra_main(cfg: FairseqConfig, **kwargs) -> float: add_defaults(cfg) if cfg.common.reset_logging: reset_logging() # Hydra hijacks logging, fix that else: # check if directly called or called through hydra_main if HydraConfig.initialized(): with open_dict(cfg): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) cfg.job_logging_cfg = OmegaConf.to_container( HydraConfig.get().job_logging, resolve=True ) with omegaconf_no_object_check(): cfg = OmegaConf.create( OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) ) OmegaConf.set_struct(cfg, True) try: if cfg.common.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, pre_main, **kwargs) else: distributed_utils.call_main(cfg, pre_main, **kwargs) except BaseException as e: if not cfg.common.suppress_crashes: raise else: logger.error("Crashed! " + str(e)) # get best val and return - useful for sweepers try: best_val = metrics.get_smoothed_value( "valid", cfg.checkpoint.best_checkpoint_metric ) except: best_val = None if best_val is None: best_val = float("inf") return best_val def cli_main(): try: from hydra._internal.utils import get_args cfg_name = get_args().config_name or "config" except: logger.warning("Failed to get config name from hydra args") cfg_name = "config" hydra_init(cfg_name) hydra_main() if __name__ == "__main__": cli_main()
2,714
28.51087
116
py
sign-topic
sign-topic-main/fairseq_cli/eval_lm.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Evaluate the perplexity of a trained language model. """ import logging import math import os import sys from argparse import Namespace from typing import Iterable, List, Optional import torch import fairseq from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter from fairseq.sequence_scorer import SequenceScorer from omegaconf import DictConfig logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.eval_lm") def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = 0, remove_bos_token: bool = False, device: Optional[torch.device] = None, ): """ Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter) """ if target_dictionary is None: target_dictionary = source_dictionary if device is None: device = next(models[0].parameters()).device gen_timer = StopwatchMeter() scorer = SequenceScorer(target_dictionary, softmax_batch) score_sum = 0.0 count = 0 if post_process is not None: if post_process in {"subword_nmt", "@@ "}: bpe_cont = post_process.rstrip() bpe_toks = { i for i in range(len(source_dictionary)) if source_dictionary[i].endswith(bpe_cont) } else: raise NotImplementedError( "--post-process={post_process} is not implemented" ) bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() for sample in batch_iterator: if "net_input" not in sample: continue sample = utils.move_to_cuda(sample, device=device) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample["ntokens"]) for i, hypos_i in enumerate(hypos): hypo = hypos_i[0] sample_id = sample["id"][i] tokens = hypo["tokens"] tgt_len = tokens.numel() pos_scores = hypo["positional_scores"].float() if remove_bos_token: assert hypo["tokens"][0].item() == target_dictionary.bos() tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if bpe_toks is not None: for i in range(tgt_len - 1): if tokens[i].item() in bpe_toks: skipped_toks += 1 pos_scores[i + 1] += pos_scores[i] pos_scores[i] = 0 inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf")) if inf_scores.any(): logger.info( "skipping tokens with inf scores:", target_dictionary.string(tokens[inf_scores.nonzero()]), ) pos_scores = pos_scores[(~inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += pos_scores.numel() - skipped_toks if output_word_probs or output_word_stats: w = "" word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += source_dictionary[w_ind] if bpe_toks is not None and w_ind in bpe_toks: w = w[:-bpe_len] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = i + 1 while ind < len(tokens): if pos_scores[ind].item() != 0: next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add( pos_scores[i].item(), next_prob ) is_bpe = False w = "" if output_word_probs: logger.info( str(int(sample_id)) + " " + ( "\t".join( "{} [{:2f}]".format(x[0], x[1]) for x in word_prob ) ) ) avg_nll_loss = ( -score_sum / count / math.log(2) if count > 0 else 0 ) # convert to base 2 logger.info( "Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format( gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0 ) ) if output_word_stats: for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True): logger.info(ws) return { "loss": avg_nll_loss, "perplexity": 2 ** avg_nll_loss, } class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): """increments counters for the sum of log probs of current word and next word (given context ending at current word). Since the next word might be at the end of the example, or it might be not counted because it is not an ending subword unit, also keeps track of how many of those we have seen""" if next_word_prob is not None: self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return "{}\t{}\t{}\t{}\t{}\t{}".format( self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, self.count - self.missing_next_words, ) def main(cfg: DictConfig, **unused_kwargs): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) logger.info(cfg) if cfg.eval_lm.context_window > 0: # reduce tokens per sample by the required context window size cfg.task.tokens_per_sample -= cfg.eval_lm.context_window # Initialize the task using the current *cfg* task = tasks.setup_task(cfg.task) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, model_args, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=eval(cfg.common_eval.model_overrides), suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, task=task, ) use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) # Optimize ensemble for generation and set the source and dest dicts on the model # (required by scorer) for model in models: if use_fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) assert len(models) > 0 logger.info( "num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters())) ) # Load dataset splits task.load_dataset(cfg.dataset.gen_subset) dataset = task.dataset(cfg.dataset.gen_subset) logger.info( "{} {} {:,} examples".format( cfg.task.data, cfg.dataset.gen_subset, len(dataset) ) ) itr = task.eval_lm_dataloader( dataset=dataset, max_tokens=cfg.dataset.max_tokens or 36000, batch_size=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( *[model.max_positions() for model in models] ), num_shards=max( cfg.dataset.num_shards, cfg.distributed_training.distributed_world_size, ), shard_id=max( cfg.dataset.shard_id, cfg.distributed_training.distributed_rank, ), num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, context_window=cfg.eval_lm.context_window, ) itr = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) results = eval_lm( models=models, source_dictionary=task.source_dictionary, batch_iterator=itr, post_process=cfg.common_eval.post_process, output_word_probs=cfg.eval_lm.output_word_probs, output_word_stats=cfg.eval_lm.output_word_stats, target_dictionary=task.target_dictionary, softmax_batch=cfg.eval_lm.softmax_batch, remove_bos_token=getattr(cfg.task, "add_bos_token", False), ) logger.info( "Loss (base 2): {:.4f}, Perplexity: {:.2f}".format( results["loss"], results["perplexity"] ) ) return results def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) if __name__ == "__main__": cli_main()
11,961
33.373563
108
py
sign-topic
sign-topic-main/fairseq_cli/interactive.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate raw text with a trained model. Batches data on-the-fly. """ import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.interactive") Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints") Translation = namedtuple("Translation", "src_str hypos pos_scores alignments") def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h: for src_str in h: buffer.append(src_str.strip()) if len(buffer) >= buffer_size: yield buffer buffer = [] if len(buffer) > 0: yield buffer def make_batches(lines, cfg, task, max_positions, encode_fn): def encode_fn_target(x): return encode_fn(x) if cfg.generation.constraints: # Strip (tab-delimited) contraints, if present, from input lines, # store them in batch_constraints batch_constraints = [list() for _ in lines] for i, line in enumerate(lines): if "\t" in line: lines[i], *batch_constraints[i] = line.split("\t") # Convert each List[str] to List[Tensor] for i, constraint_list in enumerate(batch_constraints): batch_constraints[i] = [ task.target_dictionary.encode_line( encode_fn_target(constraint), append_eos=False, add_if_not_exist=False, ) for constraint in constraint_list ] if cfg.generation.constraints: constraints_tensor = pack_constraints(batch_constraints) else: constraints_tensor = None tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn) itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference( tokens, lengths, constraints=constraints_tensor ), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, ).next_epoch_itr(shuffle=False) for batch in itr: ids = batch["id"] src_tokens = batch["net_input"]["src_tokens"] src_lengths = batch["net_input"]["src_lengths"] constraints = batch.get("constraints", None) yield Batch( ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints, ) def main(cfg: FairseqConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) start_time = time.time() total_translate_time = 0 utils.import_user_module(cfg.common) if cfg.interactive.buffer_size < 1: cfg.interactive.buffer_size = 1 if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.batch_size = 1 assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( not cfg.dataset.batch_size or cfg.dataset.batch_size <= cfg.interactive.buffer_size ), "--batch-size cannot be larger than --buffer-size" logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Setup task, e.g., translation task = tasks.setup_task(cfg.task) # Load ensemble overrides = ast.literal_eval(cfg.common_eval.model_overrides) logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Initialize generator generator = task.build_generator(models, cfg.generation) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if cfg.generation.constraints: logger.warning( "NOTE: Constrained decoding currently assumes a shared subword vocabulary." ) if cfg.interactive.buffer_size > 1: logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info("Type the input sentence and press return:") start_id = 0 for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size): results = [] for batch in make_batches(inputs, cfg, task, max_positions, encode_fn): bsz = batch.src_tokens.size(0) src_tokens = batch.src_tokens src_lengths = batch.src_lengths constraints = batch.constraints if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() if constraints is not None: constraints = constraints.cuda() sample = { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, } translate_start_time = time.time() translations = task.inference_step( generator, models, sample, constraints=constraints ) translate_time = time.time() - translate_start_time total_translate_time += translate_time list_constraints = [[] for _ in range(bsz)] if cfg.generation.constraints: list_constraints = [unpack_constraints(c) for c in constraints] for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) constraints = list_constraints[i] results.append( ( start_id + id, src_tokens_i, hypos, { "constraints": constraints, "time": translate_time / len(translations), }, ) ) # sort output to match input order for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]): src_str = "" if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) print("S-{}\t{}".format(id_, src_str)) print("W-{}\t{:.3f}\tseconds".format(id_, info["time"])) for constraint in info["constraints"]: print( "C-{}\t{}".format( id_, tgt_dict.string(constraint, cfg.common_eval.post_process), ) ) # Process top predictions for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print("H-{}\t{}\t{}".format(id_, score, hypo_str)) # detokenized hypothesis print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str)) print( "P-{}\t{}".format( id_, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"].div_(math.log(2)).tolist(), ) ), ) ) if cfg.generation.print_alignment: alignment_str = " ".join( ["{}-{}".format(src, tgt) for src, tgt in alignment] ) print("A-{}\t{}".format(id_, alignment_str)) # update running id_ counter start_id += len(inputs) logger.info( "Total time: {:.3f} seconds; translation time: {:.3f}".format( time.time() - start_time, total_translate_time ) ) def cli_main(): parser = options.get_interactive_generation_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) if __name__ == "__main__": cli_main()
11,465
35.056604
88
py
sign-topic
sign-topic-main/fairseq_cli/train.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a new model on one or across multiple GPUs. """ import argparse import logging import math import os import sys from typing import Any, Callable, Dict, List, Optional, Tuple # We need to setup root logger before importing any fairseq libraries. logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from omegaconf import DictConfig, OmegaConf from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils from fairseq.data import data_utils, iterators from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer def main(cfg: FairseqConfig) -> None: if isinstance(cfg, argparse.Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) if ( distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg ): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() if cfg.common.log_file is not None: handler = logging.FileHandler(filename=cfg.common.log_file) logger.addHandler(handler) np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) if distributed_utils.is_master(cfg.distributed_training): checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) # Print args logger.info(cfg) if cfg.checkpoint.write_checkpoints_asynchronously: try: import iopath # noqa: F401 except ImportError: logging.exception( "Asynchronous checkpoint writing is specified but iopath is " "not installed: `pip install iopath`" ) return # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(cfg.task) assert cfg.criterion, "Please specify criterion to train a model" # Build model and criterion if cfg.distributed_training.ddp_backend == "fully_sharded": with fsdp_enable_wrap(cfg.distributed_training): model = fsdp_wrap(task.build_model(cfg.model)) else: model = task.build_model(cfg.model) criterion = task.build_criterion(cfg.criterion) logger.info(model) logger.info("task: {}".format(task.__class__.__name__)) logger.info("model: {}".format(model.__class__.__name__)) logger.info("criterion: {}".format(criterion.__class__.__name__)) logger.info( "num. shared model params: {:,} (num. trained: {:,})".format( sum( p.numel() for p in model.parameters() if not getattr(p, "expert", False) ), sum( p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad ), ) ) logger.info( "num. expert model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)), sum( p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad ), ) ) # Load valid dataset (we load training data below, based on the latest checkpoint) # We load the valid dataset AFTER building the model data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg) if cfg.dataset.combine_valid_subsets: task.load_dataset("valid", combine=True, epoch=1) else: for valid_sub_split in cfg.dataset.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # (optionally) Configure quantization if cfg.common.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=cfg.common.quantization_config_path, max_epoch=cfg.optimization.max_epoch, max_update=cfg.optimization.max_update, ) else: quantizer = None # Build trainer if cfg.common.model_parallel_size == 1: trainer = Trainer(cfg, task, model, criterion, quantizer) else: trainer = MegatronTrainer(cfg, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format( cfg.distributed_training.distributed_world_size ) ) logger.info( "max tokens per device = {} and max sentences per device = {}".format( cfg.dataset.max_tokens, cfg.dataset.batch_size, ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( cfg.checkpoint, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) if cfg.common.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("load_checkpoint") # wait for all workers max_epoch = cfg.optimization.max_epoch or math.inf lr = trainer.get_lr() # Estimate model's FLOPs itr = iterators.GroupedIterator( epoch_itr.next_epoch_itr( shuffle=False, set_dataset_epoch=False, ), 1, # update_freq, skip_remainder_batch=cfg.optimization.skip_remainder_batch, ) training = model.training if training: model.eval() for i, samples in enumerate(itr): from fvcore.nn import FlopCountAnalysis for i, sample in enumerate(samples): # delayed update loop model.num_updates = 1 # TODO: check if batch size has an impact on FLOPs flops = FlopCountAnalysis( model.cpu(), (sample["net_input"]["src_tokens"].cpu(), sample["net_input"]["src_lengths"].cpu()) ) # TODO check wether here we should pass a single sample (without batch dimension) or it's fine to pass a mini-batch logger.info( "estimated total FLOPs = {}".format( flops.total() ) ) break break if training: model.train() if torch.cuda.is_available(): model.cuda() train_meter = meters.StopwatchMeter() train_meter.start() while epoch_itr.next_epoch_idx <= max_epoch: if lr <= cfg.optimization.stop_min_lr: logger.info( f"stopping training because current learning rate ({lr}) is smaller " "than or equal to minimum learning rate " f"(--stop-min-lr={cfg.optimization.stop_min_lr})" ) break # train for one epoch valid_losses, should_stop = train(cfg, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) # ioPath implementation to wait for all asynchronous file writes to complete. if cfg.checkpoint.write_checkpoints_asynchronously: logger.info( "ioPath PathManager waiting for all asynchronous checkpoint " "writes to finish." ) PathManager.async_close() logger.info("ioPath PathManager finished waiting.") def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool: # skip check if no validation was done in the current epoch if valid_loss is None: return False if cfg.checkpoint.patience <= 0: return False def is_better(a, b): return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b prev_best = getattr(should_stop_early, "best", None) if prev_best is None or is_better(valid_loss, prev_best): should_stop_early.best = valid_loss should_stop_early.num_runs = 0 return False else: should_stop_early.num_runs += 1 if should_stop_early.num_runs >= cfg.checkpoint.patience: logger.info( "early stop since valid performance hasn't improved for last {} runs".format( cfg.checkpoint.patience ) ) return True else: return False @metrics.aggregate("train") def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]: """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum), ) update_freq = ( cfg.optimization.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(cfg.optimization.update_freq) else cfg.optimization.update_freq[-1] ) itr = iterators.GroupedIterator( itr, update_freq, skip_remainder_batch=cfg.optimization.skip_remainder_batch, ) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_file=cfg.common.log_file, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), azureml_logging=( cfg.common.azureml_logging if distributed_utils.is_master(cfg.distributed_training) else False ), ) progress.update_config(_flatten_config(cfg)) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = cfg.dataset.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() logger.info("Start iterating over samples") for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % cfg.common.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop def _flatten_config(cfg: DictConfig): config = OmegaConf.to_container(cfg) # remove any legacy Namespaces and replace with a single "args" namespace = None for k, v in list(config.items()): if isinstance(v, argparse.Namespace): namespace = v del config[k] if namespace is not None: config["args"] = vars(namespace) return config def validate_and_save( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool, ) -> Tuple[List[Optional[float]], bool]: num_updates = trainer.get_num_updates() max_update = cfg.optimization.max_update or math.inf # Stopping conditions (and an additional one based on validation loss later # on) should_stop = False if num_updates >= max_update: should_stop = True logger.info( f"Stopping training due to " f"num_updates: {num_updates} >= max_update: {max_update}" ) training_time_hours = trainer.cumulative_training_time() / (60 * 60) if ( cfg.optimization.stop_time_hours > 0 and training_time_hours > cfg.optimization.stop_time_hours ): should_stop = True logger.info( f"Stopping training due to " f"cumulative_training_time: {training_time_hours} > " f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)" ) do_save = ( (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0) or should_stop or ( cfg.checkpoint.save_interval_updates > 0 and num_updates > 0 and num_updates % cfg.checkpoint.save_interval_updates == 0 and num_updates >= cfg.dataset.validate_after_updates ) ) do_validate = ( ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0) or should_stop or ( cfg.dataset.validate_interval_updates > 0 and num_updates > 0 and num_updates % cfg.dataset.validate_interval_updates == 0 ) ) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates ) # Validate valid_losses = [None] if do_validate: valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) # Save checkpoint if do_save or should_stop: checkpoint_utils.save_checkpoint( cfg.checkpoint, trainer, epoch_itr, valid_losses[0] ) return valid_losses, should_stop def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]: stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def validate( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, subsets: List[str], ) -> List[Optional[float]]: """Evaluate the model on the validation set(s) and return the losses.""" if cfg.dataset.fixed_validation_seed is not None: # set fixed seed for every validation utils.set_torch_seed(cfg.dataset.fixed_validation_seed) trainer.begin_valid_epoch(epoch_itr.epoch) valid_losses = [] for subset in subsets: logger.info('begin validation on "{}" subset'.format(subset)) # Initialize data iterator itr = trainer.get_valid_iterator(subset).next_epoch_itr( shuffle=False, set_dataset_epoch=False # use a fixed valid set ) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, prefix=f"valid on '{subset}' subset", tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), ) # create a new root metrics aggregator so validation metrics # don't pollute other aggregators (e.g., train meters) with metrics.aggregate(new_root=True) as agg: for i, sample in enumerate(progress): if ( cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps ): break trainer.valid_step(sample) # log validation stats stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values()) if hasattr(task, "post_validate"): task.post_validate(trainer.get_model(), stats, agg) progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric]) return valid_losses def get_valid_stats( cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any] ) -> Dict[str, Any]: stats["num_updates"] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, "best"): key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric) best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min stats[key] = best_function( checkpoint_utils.save_checkpoint.best, stats[cfg.checkpoint.best_checkpoint_metric], ) return stats def cli_main( modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None ) -> None: parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) cfg = convert_namespace_to_omegaconf(args) if cfg.common.use_plasma_view: server = PlasmaStore(path=cfg.common.plasma_path) logger.info( f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}" ) if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) # if cfg.common.use_plasma_view: # server.server.kill() if __name__ == "__main__": cli_main()
20,021
34.25
130
py
imagefusion-rfn-nest
imagefusion-rfn-nest-main/utils.py
import os import random import numpy as np import torch from args_fusion import args from scipy.misc import imread, imsave, imresize import matplotlib as mpl from os import listdir from os.path import join EPSILON = 1e-5 def list_images(directory): images = [] names = [] dir = listdir(directory) dir.sort() for file in dir: name = file if name.endswith('.png'): images.append(join(directory, file)) elif name.endswith('.jpg'): images.append(join(directory, file)) elif name.endswith('.jpeg'): images.append(join(directory, file)) elif name.endswith('.bmp'): images.append(join(directory, file)) elif name.endswith('.tif'): images.append(join(directory, file)) # name1 = name.split('.') names.append(name) return images, names # load training images def load_dataset(image_path, BATCH_SIZE, num_imgs=None): if num_imgs is None: num_imgs = len(image_path) original_imgs_path = image_path[:num_imgs] # random random.shuffle(original_imgs_path) mod = num_imgs % BATCH_SIZE print('BATCH SIZE %d.' % BATCH_SIZE) print('Train images number %d.' % num_imgs) print('Train images samples %s.' % str(num_imgs / BATCH_SIZE)) if mod > 0: print('Train set has been trimmed %d samples...\n' % mod) original_imgs_path = original_imgs_path[:-mod] batches = int(len(original_imgs_path) // BATCH_SIZE) return original_imgs_path, batches def get_image(path, height=256, width=256, flag=False): if flag is True: image = imread(path, mode='RGB') else: image = imread(path, mode='L') if height is not None and width is not None: image = imresize(image, [height, width], interp='nearest') return image # load images - test phase def get_test_image(paths, height=None, width=None, flag=False): if isinstance(paths, str): paths = [paths] images = [] for path in paths: if flag is True: image = imread(path, mode='RGB') else: image = imread(path, mode='L') # get saliency part if height is not None and width is not None: image = imresize(image, [height, width], interp='nearest') base_size = 512 h = image.shape[0] w = image.shape[1] c = 1 if h > base_size or w > base_size: c = 4 if flag is True: image = np.transpose(image, (2, 0, 1)) else: image = np.reshape(image, [1, h, w]) images = get_img_parts(image, h, w) else: if flag is True: image = np.transpose(image, (2, 0, 1)) else: image = np.reshape(image, [1, image.shape[0], image.shape[1]]) images.append(image) images = np.stack(images, axis=0) images = torch.from_numpy(images).float() return images, h, w, c def get_img_parts(image, h, w): images = [] h_cen = int(np.floor(h / 2)) w_cen = int(np.floor(w / 2)) img1 = image[:, 0:h_cen + 3, 0: w_cen + 3] img1 = np.reshape(img1, [1, img1.shape[0], img1.shape[1], img1.shape[2]]) img2 = image[:, 0:h_cen + 3, w_cen - 2: w] img2 = np.reshape(img2, [1, img2.shape[0], img2.shape[1], img2.shape[2]]) img3 = image[:, h_cen - 2:h, 0: w_cen + 3] img3 = np.reshape(img3, [1, img3.shape[0], img3.shape[1], img3.shape[2]]) img4 = image[:, h_cen - 2:h, w_cen - 2: w] img4 = np.reshape(img4, [1, img4.shape[0], img4.shape[1], img4.shape[2]]) images.append(torch.from_numpy(img1).float()) images.append(torch.from_numpy(img2).float()) images.append(torch.from_numpy(img3).float()) images.append(torch.from_numpy(img4).float()) return images def recons_fusion_images(img_lists, h, w): img_f_list = [] h_cen = int(np.floor(h / 2)) w_cen = int(np.floor(w / 2)) c = img_lists[0][0].shape[1] ones_temp = torch.ones(1, c, h, w).cuda() for i in range(len(img_lists[0])): # img1, img2, img3, img4 img1 = img_lists[0][i] img2 = img_lists[1][i] img3 = img_lists[2][i] img4 = img_lists[3][i] img_f = torch.zeros(1, c, h, w).cuda() count = torch.zeros(1, c, h, w).cuda() img_f[:, :, 0:h_cen + 3, 0: w_cen + 3] += img1 count[:, :, 0:h_cen + 3, 0: w_cen + 3] += ones_temp[:, :, 0:h_cen + 3, 0: w_cen + 3] img_f[:, :, 0:h_cen + 3, w_cen - 2: w] += img2 count[:, :, 0:h_cen + 3, w_cen - 2: w] += ones_temp[:, :, 0:h_cen + 3, w_cen - 2: w] img_f[:, :, h_cen - 2:h, 0: w_cen + 3] += img3 count[:, :, h_cen - 2:h, 0: w_cen + 3] += ones_temp[:, :, h_cen - 2:h, 0: w_cen + 3] img_f[:, :, h_cen - 2:h, w_cen - 2: w] += img4 count[:, :, h_cen - 2:h, w_cen - 2: w] += ones_temp[:, :, h_cen - 2:h, w_cen - 2: w] img_f = img_f / count img_f_list.append(img_f) return img_f_list def save_image_test(img_fusion, output_path): img_fusion = img_fusion.float() if args.cuda: img_fusion = img_fusion.cpu().data[0].numpy() # img_fusion = img_fusion.cpu().clamp(0, 255).data[0].numpy() else: img_fusion = img_fusion.clamp(0, 255).data[0].numpy() img_fusion = (img_fusion - np.min(img_fusion)) / (np.max(img_fusion) - np.min(img_fusion) + EPSILON) img_fusion = img_fusion * 255 img_fusion = img_fusion.transpose(1, 2, 0).astype('uint8') # cv2.imwrite(output_path, img_fusion) if img_fusion.shape[2] == 1: img_fusion = img_fusion.reshape([img_fusion.shape[0], img_fusion.shape[1]]) # img_fusion = imresize(img_fusion, [h, w]) imsave(output_path, img_fusion) def get_train_images(paths, height=256, width=256, flag=False): if isinstance(paths, str): paths = [paths] images = [] for path in paths: image = get_image(path, height, width, flag) if flag is True: image = np.transpose(image, (2, 0, 1)) else: image = np.reshape(image, [1, height, width]) images.append(image) images = np.stack(images, axis=0) images = torch.from_numpy(images).float() return images
6,275
33.108696
104
py
imagefusion-rfn-nest
imagefusion-rfn-nest-main/test_21pairs.py
# test phase import os import torch from torch.autograd import Variable from net import NestFuse_light2_nodense, Fusion_network, Fusion_strategy import utils from args_fusion import args import numpy as np def load_model(path_auto, path_fusion, fs_type, flag_img): if flag_img is True: nc = 3 else: nc =1 input_nc = nc output_nc = nc nb_filter = [64, 112, 160, 208, 256] nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision=False) nest_model.load_state_dict(torch.load(path_auto)) fusion_model = Fusion_network(nb_filter, fs_type) fusion_model.load_state_dict(torch.load(path_fusion)) fusion_strategy = Fusion_strategy(fs_type) para = sum([np.prod(list(p.size())) for p in nest_model.parameters()]) type_size = 4 print('Model {} : params: {:4f}M'.format(nest_model._get_name(), para * type_size / 1000 / 1000)) para = sum([np.prod(list(p.size())) for p in fusion_model.parameters()]) type_size = 4 print('Model {} : params: {:4f}M'.format(fusion_model._get_name(), para * type_size / 1000 / 1000)) nest_model.eval() fusion_model.eval() nest_model.cuda() fusion_model.cuda() return nest_model, fusion_model, fusion_strategy def run_demo(nest_model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path_root, name_ir, fs_type, use_strategy, flag_img, alpha): img_ir, h, w, c = utils.get_test_image(infrared_path, flag=flag_img) # True for rgb img_vi, h, w, c = utils.get_test_image(visible_path, flag=flag_img) # dim = img_ir.shape if c is 1: if args.cuda: img_ir = img_ir.cuda() img_vi = img_vi.cuda() img_ir = Variable(img_ir, requires_grad=False) img_vi = Variable(img_vi, requires_grad=False) # encoder en_r = nest_model.encoder(img_ir) en_v = nest_model.encoder(img_vi) # fusion net if use_strategy: f = fusion_strategy(en_r, en_v) else: f = fusion_model(en_r, en_v) # decoder img_fusion_list = nest_model.decoder_eval(f) else: # fusion each block img_fusion_blocks = [] for i in range(c): # encoder img_vi_temp = img_vi[i] img_ir_temp = img_ir[i] if args.cuda: img_vi_temp = img_vi_temp.cuda() img_ir_temp = img_ir_temp.cuda() img_vi_temp = Variable(img_vi_temp, requires_grad=False) img_ir_temp = Variable(img_ir_temp, requires_grad=False) en_r = nest_model.encoder(img_ir_temp) en_v = nest_model.encoder(img_vi_temp) # fusion net if use_strategy: f = fusion_strategy(en_r, en_v) else: f = fusion_model(en_r, en_v) # decoder img_fusion_temp = nest_model.decoder_eval(f) img_fusion_blocks.append(img_fusion_temp) img_fusion_list = utils.recons_fusion_images(img_fusion_blocks, h, w) # ########################### multi-outputs ############################################## output_count = 0 for img_fusion in img_fusion_list: file_name = 'fused_' + alpha + '_' + name_ir output_path = output_path_root + file_name output_count += 1 # save images utils.save_image_test(img_fusion, output_path) print(output_path) def main(): # False - gray flag_img = False # ################# gray scale ######################################## test_path = "images/21_pairs_tno/ir/" path_auto = args.resume_nestfuse output_path_root = "./outputs/alpha_1e4_21/" if os.path.exists(output_path_root) is False: os.mkdir(output_path_root) fs_type = 'res' # res (RFN), add, avg, max, spa, nuclear use_strategy = False # True - static strategy; False - RFN path_fusion_root = args.fusion_model with torch.no_grad(): # alpha_list = [2500, 5000, 15000, 20000, 25000] alpha_list = [700] w_all_list = [[6.0, 3.0]] for alpha in alpha_list: for w_all in w_all_list: w, w2 = w_all temp = 'rfnnest_' + str(alpha) + '_wir_' + str(w) + '_wvi_' + str(w2) output_path_list = 'fused_' + temp + '_21' + '_' + fs_type output_path1 = output_path_root + output_path_list + '/' if os.path.exists(output_path1) is False: os.mkdir(output_path1) output_path = output_path1 # load network path_fusion = path_fusion_root + str(w) + '/' + 'Final_epoch_2_alpha_' + str(alpha) + '_wir_' + str(w) + '_wvi_' + str(w2) + '_ssim_vi.model' model, fusion_model, fusion_strategy = load_model(path_auto, path_fusion, fs_type, flag_img) imgs_paths_ir, names = utils.list_images(test_path) num = len(imgs_paths_ir) for i in range(num): name_ir = names[i] infrared_path = imgs_paths_ir[i] visible_path = infrared_path.replace('ir/', 'vis/') if visible_path.__contains__('IR'): visible_path = visible_path.replace('IR', 'VIS') else: visible_path = visible_path.replace('i.', 'v.') run_demo(model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path, name_ir, fs_type, use_strategy, flag_img, temp) print('Done......') if __name__ == '__main__': main()
4,860
31.406667
152
py
imagefusion-rfn-nest
imagefusion-rfn-nest-main/net.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F EPSILON = 1e-10 def var(x, dim=0): x_zero_meaned = x - x.mean(dim).expand_as(x) return x_zero_meaned.pow(2).mean(dim) class MultConst(nn.Module): def forward(self, input): return 255*input class UpsampleReshape_eval(torch.nn.Module): def __init__(self): super(UpsampleReshape_eval, self).__init__() self.up = nn.Upsample(scale_factor=2) def forward(self, x1, x2): x2 = self.up(x2) shape_x1 = x1.size() shape_x2 = x2.size() left = 0 right = 0 top = 0 bot = 0 if shape_x1[3] != shape_x2[3]: lef_right = shape_x1[3] - shape_x2[3] if lef_right%2 is 0.0: left = int(lef_right/2) right = int(lef_right/2) else: left = int(lef_right / 2) right = int(lef_right - left) if shape_x1[2] != shape_x2[2]: top_bot = shape_x1[2] - shape_x2[2] if top_bot%2 is 0.0: top = int(top_bot/2) bot = int(top_bot/2) else: top = int(top_bot / 2) bot = int(top_bot - top) reflection_padding = [left, right, top, bot] reflection_pad = nn.ReflectionPad2d(reflection_padding) x2 = reflection_pad(x2) return x2 # Convolution operation class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, is_last=False): super(ConvLayer, self).__init__() reflection_padding = int(np.floor(kernel_size / 2)) self.reflection_pad = nn.ReflectionPad2d(reflection_padding) self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride) self.dropout = nn.Dropout2d(p=0.5) self.is_last = is_last def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) if self.is_last is False: # out = F.normalize(out) out = F.relu(out, inplace=True) # out = self.dropout(out) return out # Dense convolution unit class DenseConv2d(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(DenseConv2d, self).__init__() self.dense_conv = ConvLayer(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.dense_conv(x) out = torch.cat([x, out], 1) return out # Dense Block unit # light version class DenseBlock_light(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(DenseBlock_light, self).__init__() # out_channels_def = 16 out_channels_def = int(in_channels / 2) # out_channels_def = out_channels denseblock = [] denseblock += [ConvLayer(in_channels, out_channels_def, kernel_size, stride), ConvLayer(out_channels_def, out_channels, 1, stride)] self.denseblock = nn.Sequential(*denseblock) def forward(self, x): out = self.denseblock(x) return out class FusionBlock_res(torch.nn.Module): def __init__(self, channels, index): super(FusionBlock_res, self).__init__() ws = [3, 3, 3, 3] self.conv_fusion = ConvLayer(2*channels, channels, ws[index], 1) self.conv_ir = ConvLayer(channels, channels, ws[index], 1) self.conv_vi = ConvLayer(channels, channels, ws[index], 1) block = [] block += [ConvLayer(2*channels, channels, 1, 1), ConvLayer(channels, channels, ws[index], 1), ConvLayer(channels, channels, ws[index], 1)] self.bottelblock = nn.Sequential(*block) def forward(self, x_ir, x_vi): # initial fusion - conv # print('conv') f_cat = torch.cat([x_ir, x_vi], 1) f_init = self.conv_fusion(f_cat) out_ir = self.conv_ir(x_ir) out_vi = self.conv_vi(x_vi) # 原来的代码有问题,写成了conv_ir,现在重新训练 out = torch.cat([out_ir, out_vi], 1) out = self.bottelblock(out) out = f_init + out return out # Fusion network, 4 groups of features class Fusion_network(nn.Module): def __init__(self, nC, fs_type): super(Fusion_network, self).__init__() self.fs_type = fs_type self.fusion_block1 = FusionBlock_res(nC[0], 0) self.fusion_block2 = FusionBlock_res(nC[1], 1) self.fusion_block3 = FusionBlock_res(nC[2], 2) self.fusion_block4 = FusionBlock_res(nC[3], 3) def forward(self, en_ir, en_vi): f1_0 = self.fusion_block1(en_ir[0], en_vi[0]) f2_0 = self.fusion_block2(en_ir[1], en_vi[1]) f3_0 = self.fusion_block3(en_ir[2], en_vi[2]) f4_0 = self.fusion_block4(en_ir[3], en_vi[3]) return [f1_0, f2_0, f3_0, f4_0] class Fusion_ADD(torch.nn.Module): def forward(self, en_ir, en_vi): temp = en_ir + en_vi return temp class Fusion_AVG(torch.nn.Module): def forward(self, en_ir, en_vi): temp = (en_ir + en_vi) / 2 return temp class Fusion_MAX(torch.nn.Module): def forward(self, en_ir, en_vi): temp = torch.max(en_ir, en_vi) return temp class Fusion_SPA(torch.nn.Module): def forward(self, en_ir, en_vi): shape = en_ir.size() spatial_type = 'mean' # calculate spatial attention spatial1 = spatial_attention(en_ir, spatial_type) spatial2 = spatial_attention(en_vi, spatial_type) # get weight map, soft-max spatial_w1 = torch.exp(spatial1) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON) spatial_w2 = torch.exp(spatial2) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON) spatial_w1 = spatial_w1.repeat(1, shape[1], 1, 1) spatial_w2 = spatial_w2.repeat(1, shape[1], 1, 1) tensor_f = spatial_w1 * en_ir + spatial_w2 * en_vi return tensor_f # spatial attention def spatial_attention(tensor, spatial_type='sum'): spatial = [] if spatial_type is 'mean': spatial = tensor.mean(dim=1, keepdim=True) elif spatial_type is 'sum': spatial = tensor.sum(dim=1, keepdim=True) return spatial # fuison strategy based on nuclear-norm (channel attention form NestFuse) class Fusion_Nuclear(torch.nn.Module): def forward(self, en_ir, en_vi): shape = en_ir.size() # calculate channel attention global_p1 = nuclear_pooling(en_ir) global_p2 = nuclear_pooling(en_vi) # get weight map global_p_w1 = global_p1 / (global_p1 + global_p2 + EPSILON) global_p_w2 = global_p2 / (global_p1 + global_p2 + EPSILON) global_p_w1 = global_p_w1.repeat(1, 1, shape[2], shape[3]) global_p_w2 = global_p_w2.repeat(1, 1, shape[2], shape[3]) tensor_f = global_p_w1 * en_ir + global_p_w2 * en_vi return tensor_f # sum of S V for each chanel def nuclear_pooling(tensor): shape = tensor.size() vectors = torch.zeros(1, shape[1], 1, 1).cuda() for i in range(shape[1]): u, s, v = torch.svd(tensor[0, i, :, :] + EPSILON) s_sum = torch.sum(s) vectors[0, i, 0, 0] = s_sum return vectors # Fusion strategy, two type class Fusion_strategy(nn.Module): def __init__(self, fs_type): super(Fusion_strategy, self).__init__() self.fs_type = fs_type self.fusion_add = Fusion_ADD() self.fusion_avg = Fusion_AVG() self.fusion_max = Fusion_MAX() self.fusion_spa = Fusion_SPA() self.fusion_nuc = Fusion_Nuclear() def forward(self, en_ir, en_vi): if self.fs_type is 'add': fusion_operation = self.fusion_add elif self.fs_type is 'avg': fusion_operation = self.fusion_avg elif self.fs_type is 'max': fusion_operation = self.fusion_max elif self.fs_type is 'spa': fusion_operation = self.fusion_spa elif self.fs_type is 'nuclear': fusion_operation = self.fusion_nuc f1_0 = fusion_operation(en_ir[0], en_vi[0]) f2_0 = fusion_operation(en_ir[1], en_vi[1]) f3_0 = fusion_operation(en_ir[2], en_vi[2]) f4_0 = fusion_operation(en_ir[3], en_vi[3]) return [f1_0, f2_0, f3_0, f4_0] # NestFuse network - light, no desnse class NestFuse_light2_nodense(nn.Module): def __init__(self, nb_filter, input_nc=1, output_nc=1, deepsupervision=True): super(NestFuse_light2_nodense, self).__init__() self.deepsupervision = deepsupervision block = DenseBlock_light output_filter = 16 kernel_size = 3 stride = 1 self.pool = nn.MaxPool2d(2, 2) self.up = nn.Upsample(scale_factor=2) self.up_eval = UpsampleReshape_eval() # encoder self.conv0 = ConvLayer(input_nc, output_filter, 1, stride) self.DB1_0 = block(output_filter, nb_filter[0], kernel_size, 1) self.DB2_0 = block(nb_filter[0], nb_filter[1], kernel_size, 1) self.DB3_0 = block(nb_filter[1], nb_filter[2], kernel_size, 1) self.DB4_0 = block(nb_filter[2], nb_filter[3], kernel_size, 1) # decoder self.DB1_1 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1) self.DB2_1 = block(nb_filter[1] + nb_filter[2], nb_filter[1], kernel_size, 1) self.DB3_1 = block(nb_filter[2] + nb_filter[3], nb_filter[2], kernel_size, 1) # # no short connection # self.DB1_2 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1) # self.DB2_2 = block(nb_filter[1] + nb_filter[2], nb_filter[1], kernel_size, 1) # self.DB1_3 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1) # short connection self.DB1_2 = block(nb_filter[0] * 2 + nb_filter[1], nb_filter[0], kernel_size, 1) self.DB2_2 = block(nb_filter[1] * 2+ nb_filter[2], nb_filter[1], kernel_size, 1) self.DB1_3 = block(nb_filter[0] * 3 + nb_filter[1], nb_filter[0], kernel_size, 1) if self.deepsupervision: self.conv1 = ConvLayer(nb_filter[0], output_nc, 1, stride) self.conv2 = ConvLayer(nb_filter[0], output_nc, 1, stride) self.conv3 = ConvLayer(nb_filter[0], output_nc, 1, stride) # self.conv4 = ConvLayer(nb_filter[0], output_nc, 1, stride) else: self.conv_out = ConvLayer(nb_filter[0], output_nc, 1, stride) def encoder(self, input): x = self.conv0(input) x1_0 = self.DB1_0(x) x2_0 = self.DB2_0(self.pool(x1_0)) x3_0 = self.DB3_0(self.pool(x2_0)) x4_0 = self.DB4_0(self.pool(x3_0)) # x5_0 = self.DB5_0(self.pool(x4_0)) return [x1_0, x2_0, x3_0, x4_0] def decoder_train(self, f_en): x1_1 = self.DB1_1(torch.cat([f_en[0], self.up(f_en[1])], 1)) x2_1 = self.DB2_1(torch.cat([f_en[1], self.up(f_en[2])], 1)) x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up(x2_1)], 1)) x3_1 = self.DB3_1(torch.cat([f_en[2], self.up(f_en[3])], 1)) x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up(x3_1)], 1)) x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up(x2_2)], 1)) if self.deepsupervision: output1 = self.conv1(x1_1) output2 = self.conv2(x1_2) output3 = self.conv3(x1_3) # output4 = self.conv4(x1_4) return [output1, output2, output3] else: output = self.conv_out(x1_3) return [output] def decoder_eval(self, f_en): x1_1 = self.DB1_1(torch.cat([f_en[0], self.up_eval(f_en[0], f_en[1])], 1)) x2_1 = self.DB2_1(torch.cat([f_en[1], self.up_eval(f_en[1], f_en[2])], 1)) x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up_eval(f_en[0], x2_1)], 1)) x3_1 = self.DB3_1(torch.cat([f_en[2], self.up_eval(f_en[2], f_en[3])], 1)) x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up_eval(f_en[1], x3_1)], 1)) x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up_eval(f_en[0], x2_2)], 1)) if self.deepsupervision: output1 = self.conv1(x1_1) output2 = self.conv2(x1_2) output3 = self.conv3(x1_3) # output4 = self.conv4(x1_4) return [output1, output2, output3] else: output = self.conv_out(x1_3) return [output]
12,480
34.157746
96
py