code
stringlengths
17
6.64M
def add_preprocess_args(parser): group = parser.add_argument_group('Preprocessing') group.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') group.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') group.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix') group.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes') group.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes') group.add_argument('--align-suffix', metavar='FP', default=None, help='alignment file suffix') group.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir') group.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') group.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') group.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary') group.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary') group.add_argument('--nwordstgt', metavar='N', default=(- 1), type=int, help='number of target words to retain') group.add_argument('--nwordssrc', metavar='N', default=(- 1), type=int, help='number of source words to retain') group.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)') parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap', choices=get_available_dataset_impl(), help='output dataset implementation') group.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary') group.add_argument('--only-source', action='store_true', help='Only process the source language') group.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N') group.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers') return parser
def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group('Dataset and data loading') group.add_argument('--num-workers', default=1, type=int, metavar='N', help='how many subprocesses to use for data loading') group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true', help='ignore too long or too short lines in valid and test set') group.add_argument('--max-tokens', type=int, metavar='N', help='maximum number of tokens in a batch') group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N', help='maximum number of sentences in a batch') group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N', help='batch size will be a multiplier of this value') parser.add_argument('--dataset-impl', metavar='FORMAT', choices=get_available_dataset_impl(), help='output dataset implementation') if train: group.add_argument('--train-subset', default='train', metavar='SPLIT', choices=['train', 'valid', 'test'], help='data subset to use for training (train, valid, test)') group.add_argument('--valid-subset', default='valid', metavar='SPLIT', help='comma separated list of data subsets to use for validation (train, valid, valid1, test, test1)') group.add_argument('--validate-interval', type=int, default=1, metavar='N', help='validate every N epochs') group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N', help='specified random seed for validation') group.add_argument('--disable-validation', action='store_true', help='disable validation') group.add_argument('--max-tokens-valid', type=int, metavar='N', help='maximum number of tokens in a validation batch (defaults to --max-tokens)') group.add_argument('--max-sentences-valid', type=int, metavar='N', help='maximum number of sentences in a validation batch (defaults to --max-sentences)') group.add_argument('--curriculum', default=0, type=int, metavar='N', help="don't shuffle batches for first N epochs") if gen: group.add_argument('--gen-subset', default='test', metavar='SPLIT', help='data subset to generate (train, valid, test)') group.add_argument('--num-shards', default=1, type=int, metavar='N', help='shard generation over N shards') group.add_argument('--shard-id', default=0, type=int, metavar='ID', help='id of the shard to generate (id < num_shards)') return group
def add_distributed_training_args(parser): group = parser.add_argument_group('Distributed training') group.add_argument('--distributed-world-size', type=int, metavar='N', default=max(1, torch.cuda.device_count()), help='total number of GPUs across all nodes (default: all visible GPUs)') group.add_argument('--distributed-rank', default=0, type=int, help='rank of the current worker') group.add_argument('--distributed-backend', default='nccl', type=str, help='distributed backend') group.add_argument('--distributed-init-method', default=None, type=str, help='typically tcp://hostname:port that will be used to establish initial connetion') group.add_argument('--distributed-port', default=(- 1), type=int, help='port number (not required if using --distributed-init-method)') group.add_argument('--device-id', '--local_rank', default=0, type=int, help='which GPU to use (usually configured automatically)') group.add_argument('--distributed-no-spawn', action='store_true', help='do not spawn multiple processes even if multiple GPUs are visible') group.add_argument('--ddp-backend', default='c10d', type=str, choices=['c10d', 'no_c10d'], help='DistributedDataParallel backend') group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB', help='bucket size for reduction') group.add_argument('--fix-batches-to-gpus', action='store_true', help="don't shuffle batches between GPUs; this reduces overall randomness and may affect precision but avoids the cost of re-reading the data") group.add_argument('--find-unused-parameters', default=False, action='store_true', help='disable unused parameter detection (not applicable to no_c10d ddp-backend') group.add_argument('--fast-stat-sync', default=False, action='store_true', help='Enable fast sync of stats between nodes, this hardcodes to sync only some default stats from logging_output.') return group
def add_optimization_args(parser): group = parser.add_argument_group('Optimization') group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N', help='force stop training at specified epoch') group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N', help='force stop training at specified update') group.add_argument('--clip-norm', default=25, type=float, metavar='NORM', help='clip threshold of gradients') group.add_argument('--sentence-avg', action='store_true', help='normalize gradients by the number of sentences in a batch (default is to normalize by number of tokens)') group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K', type=(lambda uf: eval_str_list(uf, type=int)), help='update parameters every N_i batches, when in epoch i') group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list, metavar='LR_1,LR_2,...,LR_N', help='learning rate for the first N epochs; all epochs >N using LR_N (note: this may be interpreted differently depending on --lr-scheduler)') group.add_argument('--min-lr', default=(- 1), type=float, metavar='LR', help='stop training when the learning rate reaches this minimum') group.add_argument('--use-bmuf', default=False, action='store_true', help='specify global optimizer for syncing models on different GPUs/shards') return group
def add_checkpoint_args(parser): group = parser.add_argument_group('Checkpointing') group.add_argument('--save-dir', metavar='DIR', default='checkpoints', help='path to save checkpoints') group.add_argument('--restore-file', default='checkpoint_last.pt', help='filename from which to load checkpoint (default: <save-dir>/checkpoint_last.pt') group.add_argument('--reset-dataloader', action='store_true', help='if set, does not reload dataloader state from the checkpoint') group.add_argument('--reset-lr-scheduler', action='store_true', help='if set, does not load lr scheduler state from the checkpoint') group.add_argument('--reset-meters', action='store_true', help='if set, does not load meters from the checkpoint') group.add_argument('--reset-optimizer', action='store_true', help='if set, does not load optimizer state from the checkpoint') group.add_argument('--optimizer-overrides', default='{}', type=str, metavar='DICT', help='a dictionary used to override optimizer args when loading a checkpoint') group.add_argument('--save-interval', type=int, default=1, metavar='N', help='save a checkpoint every N epochs') group.add_argument('--save-interval-updates', type=int, default=0, metavar='N', help='save a checkpoint (and validate) every N updates') group.add_argument('--keep-interval-updates', type=int, default=(- 1), metavar='N', help='keep the last N checkpoints saved with --save-interval-updates') group.add_argument('--keep-last-epochs', type=int, default=(- 1), metavar='N', help='keep last N epoch checkpoints') group.add_argument('--no-save', action='store_true', help="don't save models or checkpoints") group.add_argument('--no-epoch-checkpoints', action='store_true', help='only store last and best checkpoints') group.add_argument('--no-last-checkpoints', action='store_true', help="don't store last checkpoints") group.add_argument('--no-save-optimizer-state', action='store_true', help="don't save optimizer-state as part of checkpoint") group.add_argument('--best-checkpoint-metric', type=str, default='loss', help='metric to use for saving "best" checkpoints') group.add_argument('--maximize-best-checkpoint-metric', action='store_true', help='select the largest metric value for saving "best" checkpoints') group.add_argument('--early-stop', default=10000, type=int) return group
def add_common_eval_args(group): group.add_argument('--path', metavar='FILE', help='path(s) to model file(s), colon separated') group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE tokens before scoring (can be set to sentencepiece)') group.add_argument('--quiet', action='store_true', help='only print final scores') group.add_argument('--model-overrides', default='{}', type=str, metavar='DICT', help='a dictionary used to override model args at generation that were used during model training') group.add_argument('--results-path', metavar='RESDIR', type=str, default=None, help='path to save eval results (optional)"')
def add_eval_lm_args(parser): group = parser.add_argument_group('LM Evaluation') add_common_eval_args(group) group.add_argument('--output-word-probs', action='store_true', help='if set, outputs words and their predicted log probabilities to standard output') group.add_argument('--output-word-stats', action='store_true', help='if set, outputs word statistics such as word count, average probability, etc') group.add_argument('--context-window', default=0, type=int, metavar='N', help='ensures that every evaluated token has access to a context of at least this size, if possible') group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N', help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens in order to fit into GPU memory')
def add_generation_args(parser): group = parser.add_argument_group('Generation') add_common_eval_args(group) group.add_argument('--beam', default=5, type=int, metavar='N', help='beam size') group.add_argument('--nbest', default=1, type=int, metavar='N', help='number of hypotheses to output') group.add_argument('--max-len-a', default=0, type=float, metavar='N', help='generate sequences of maximum length ax + b, where x is the source length') group.add_argument('--max-len-b', default=200, type=int, metavar='N', help='generate sequences of maximum length ax + b, where x is the source length') group.add_argument('--min-len', default=1, type=float, metavar='N', help='minimum generation length') group.add_argument('--match-source-len', default=False, action='store_true', help='generations should match the source length') group.add_argument('--no-early-stop', action='store_true', help='deprecated') group.add_argument('--unnormalized', action='store_true', help='compare unnormalized hypothesis scores') group.add_argument('--no-beamable-mm', action='store_true', help="don't use BeamableMM in attention layers") group.add_argument('--lenpen', default=1, type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--unkpen', default=0, type=float, help='unknown word penalty: <0 produces more unks, >0 produces fewer') group.add_argument('--replace-unk', nargs='?', const=True, default=None, help='perform unknown replacement (optionally with alignment dictionary)') group.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') group.add_argument('--score-reference', action='store_true', help='just score the reference translation') group.add_argument('--prefix-size', default=0, type=int, metavar='PS', help='initialize generation by target prefix of given length') group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N', help='ngram blocking such that this size ngram cannot be repeated in the generation') group.add_argument('--sampling', action='store_true', help='sample hypotheses instead of using beam search') group.add_argument('--sampling-topk', default=(- 1), type=int, metavar='PS', help='sample from top K likely next words instead of all words') group.add_argument('--sampling-topp', default=(- 1.0), type=float, metavar='PS', help='sample from the smallest set whose cumulative probability mass exceeds p for next words') group.add_argument('--temperature', default=1.0, type=float, metavar='N', help='temperature for generation') group.add_argument('--diverse-beam-groups', default=(- 1), type=int, metavar='N', help='number of groups for Diverse Beam Search') group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N', help='strength of diversity penalty for Diverse Beam Search') group.add_argument('--print-alignment', action='store_true', help='if set, uses attention feedback to compute and print alignment to source tokens') group.add_argument('--print-step', action='store_true') group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N', help='if > 0.0, it penalized early-stopping in decoding.') group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N', help='maximum iterations for iterative refinement.') group.add_argument('--iter-decode-force-max-iter', action='store_true', help='if set, run exact the maximum number of iterations without early stop') group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs']) return group
def add_interactive_args(parser): group = parser.add_argument_group('Interactive') group.add_argument('--buffer-size', default=0, type=int, metavar='N', help='read this many sentences into a buffer before processing them') group.add_argument('--input', default='-', type=str, metavar='FILE', help='file to read from; use - for stdin')
def add_model_args(parser): group = parser.add_argument_group('Model configuration') from fairseq.models import ARCH_MODEL_REGISTRY group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', required=True, choices=ARCH_MODEL_REGISTRY.keys(), help='Model Architecture') return group
class MultiprocessingPdb(pdb.Pdb): 'A Pdb wrapper that works in a multiprocessing environment.\n\n Usage: `from fairseq import pdb; pdb.set_trace()`\n ' def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with _stdin_lock: try: if (_stdin_fd is not None): if (not _stdin[0]): _stdin[0] = os.fdopen(_stdin_fd) sys.stdin = _stdin[0] self.cmdloop() finally: sys.stdin = stdin_bak
def set_trace(): pdb = MultiprocessingPdb() pdb.set_trace(sys._getframe().f_back)
class Trainer(object): 'Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n ' def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None): self.args = args self.task = task self._criterion = criterion self._model = model self.cuda = (torch.cuda.is_available() and (not args.cpu)) if args.fp16: self._criterion = self._criterion.half() self._model = self._model.half() if self.cuda: self._criterion = self._criterion.cuda() self._model = self._model.cuda() self._dummy_batch = dummy_batch self._oom_batch = (oom_batch or dummy_batch) self._lr_scheduler = None self._num_updates = 0 self._optim_history = None self._optimizer = None self._prev_grad_norm = None self._wrapped_criterion = None self._wrapped_model = None self._all_reduce_list = ([0.0] * 6) self.fast_stat_sync = args.fast_stat_sync self.init_meters(args) def init_meters(self, args): self.meters = OrderedDict() self.meters['train_loss'] = AverageMeter() self.meters['train_nll_loss'] = AverageMeter() self.meters['valid_loss'] = AverageMeter() self.meters['valid_nll_loss'] = AverageMeter() self.meters['wps'] = TimeMeter() self.meters['ups'] = TimeMeter() self.meters['wpb'] = AverageMeter() self.meters['bsz'] = AverageMeter() self.meters['gnorm'] = AverageMeter() self.meters['clip'] = AverageMeter() self.meters['oom'] = AverageMeter() if args.fp16: self.meters['loss_scale'] = AverageMeter() self.meters['wall'] = TimeMeter() self.meters['train_wall'] = StopwatchMeter() @property def criterion(self): if (self._wrapped_criterion is None): if (utils.has_parameters(self._criterion) and (self.args.distributed_world_size > 1) and (not self.args.use_bmuf)): self._wrapped_criterion = models.DistributedFairseqModel(self.args, self._criterion) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if (self._wrapped_model is None): if ((self.args.distributed_world_size > 1) and (not self.args.use_bmuf)): self._wrapped_model = models.DistributedFairseqModel(self.args, self._model) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): if (self._optimizer is None): self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if (self._lr_scheduler is None): self._build_optimizer() return self._lr_scheduler def _build_optimizer(self): params = list(filter((lambda p: p.requires_grad), chain(self.model.parameters(), self.criterion.parameters()))) if self.args.fp16: if (self.cuda and (torch.cuda.get_device_capability(0)[0] < 7)): print('| WARNING: your device does NOT support faster training with --fp16, please switch to FP32 which is likely to be faster') if self.args.memory_efficient_fp16: self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.args, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params) else: if (self.cuda and (torch.cuda.get_device_capability(0)[0] >= 7)): print('| NOTICE: your device may support faster training with --fp16') self._optimizer = optim.build_optimizer(self.args, params) if self.args.use_bmuf: self._optimizer = optim.FairseqBMUF(self.args, self._optimizer) self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer) self._lr_scheduler.step_update(0) def save_checkpoint(self, filename, extra_state): 'Save all training state in a checkpoint file.' if distributed_utils.is_master(self.args): extra_state['train_meters'] = self.meters checkpoint_utils.save_state(filename, self.args, self.get_model().state_dict(), self.get_criterion(), self.optimizer, self.lr_scheduler, self.get_num_updates(), self._optim_history, extra_state) def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False): 'Load all training state from a checkpoint file.' (extra_state, self._optim_history, last_optim_state) = (None, [], None) try: from fairseq.fb_pathmgr import fb_pathmgr bexists = fb_pathmgr.isfile(filename) except Exception: bexists = os.path.exists(filename) if bexists: state = checkpoint_utils.load_checkpoint_to_cpu(filename) try: self.get_model().load_state_dict(state['model'], strict=True, args=self.args) if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict(state['criterion'], strict=True) except Exception: raise Exception('Cannot load model parameters from checkpoint {}; please ensure that the architectures match.'.format(filename)) extra_state = state['extra_state'] self._optim_history = state['optimizer_history'] last_optim_state = state.get('last_optimizer_state', None) if ((last_optim_state is not None) and (not reset_optimizer)): self._build_optimizer() last_optim = self._optim_history[(- 1)] assert (last_optim['criterion_name'] == self.get_criterion().__class__.__name__), 'Criterion does not match; please reset the optimizer (--reset-optimizer).' assert (last_optim['optimizer_name'] == self.optimizer.__class__.__name__), 'Optimizer does not match; please reset the optimizer (--reset-optimizer).' if (not reset_lr_scheduler): self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state']) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim['num_updates']) if (extra_state is not None): epoch = extra_state['train_iterator']['epoch'] print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(filename, epoch, self.get_num_updates())) self.lr_step(epoch) if (('train_meters' in extra_state) and (not reset_meters)): self.meters.update(extra_state['train_meters']) del extra_state['train_meters'] for meter in self.meters.values(): if isinstance(meter, TimeMeter): meter.reset() else: print('| no existing checkpoint found {}'.format(filename)) return extra_state def get_train_iterator(self, epoch, combine=True, load_dataset=True, data_selector=None): 'Return an EpochBatchIterator over the training set for a given epoch.' if load_dataset: print('| loading train data for epoch {}'.format(epoch)) self.task.load_dataset(self.args.train_subset, epoch=epoch, combine=combine, data_selector=data_selector) return self.task.get_batch_iterator(dataset=self.task.dataset(self.args.train_subset), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions()), ignore_invalid_inputs=True, required_batch_size_multiple=self.args.required_batch_size_multiple, seed=self.args.seed, num_shards=self.args.distributed_world_size, shard_id=self.args.distributed_rank, num_workers=self.args.num_workers, epoch=epoch) def train_step(self, samples, dummy_batch=False, raise_oom=False): 'Do forward, backward and parameter update.' if (self._dummy_batch is None): self._dummy_batch = samples[0] self._set_seed() self.model.train() self.criterion.train() self.zero_grad() if (not dummy_batch): self.meters['train_wall'].start() (logging_outputs, sample_sizes, ooms) = ([], [], 0) for (i, sample) in enumerate(samples): sample = self._prepare_sample(sample) if (sample is None): sample = self._prepare_sample(self._dummy_batch) ignore_grad = True else: ignore_grad = False def maybe_no_sync(): '\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n ' if ((self.args.distributed_world_size > 1) and hasattr(self.model, 'no_sync') and (i < (len(samples) - 1))): return self.model.no_sync() else: return contextlib.ExitStack() try: with maybe_no_sync(): (loss, sample_size, logging_output) = self.task.train_step(sample, self.model, self.criterion, self.optimizer, ignore_grad) if (not ignore_grad): logging_outputs.append(logging_output) sample_sizes.append(sample_size) if self.fast_stat_sync: self._all_reduce_list[0] += sample_size self._all_reduce_list[1] += logging_output.get('nsentences', 0.0) self._all_reduce_list[2] += logging_output.get('loss', 0.0) self._all_reduce_list[3] += logging_output.get('nll_loss', 0.0) self._all_reduce_list[4] += logging_output.get('ntokens', 0.0) except RuntimeError as e: if ('out of memory' in str(e)): msg = (('| WARNING: ran out of memory with exception: ' + '{};'.format(e)) + '\n Skipping batch') print(msg, file=sys.stderr) if (torch.cuda.is_available() and hasattr(torch.cuda, 'memory_summary')): for device_idx in range(torch.cuda.device_count()): print(torch.cuda.memory_summary(device=device_idx), file=sys.stderr) sys.stderr.flush() if raise_oom: raise ValueError(msg) ooms += 1 self.zero_grad() else: raise e if self.fast_stat_sync: self._all_reduce_list[5] += ooms if ((ooms > 0) and (self._oom_batch is not None)): self.handle_ooms(ooms) if dummy_batch: return None if self.fast_stat_sync: all_reduce_list_tensor = torch.cuda.DoubleTensor(self._all_reduce_list) if self._sync_stats(): torch.distributed.all_reduce(all_reduce_list_tensor) all_reduce_list_tensor[2:4].div_((all_reduce_list_tensor[0:1] * torch.log(torch.cuda.DoubleTensor([2])))) self._all_reduce_list = all_reduce_list_tensor.tolist() logging_output = {} [sample_size, logging_output['nsentences'], logging_output['loss'], logging_output['nll_loss'], logging_output['ntokens'], ooms] = self._all_reduce_list elif self._sync_stats(): (logging_outputs, sample_sizes, ooms, prev_norms) = zip(*distributed_utils.all_gather_list([logging_outputs, sample_sizes, ooms, self._prev_grad_norm])) logging_outputs = list(chain.from_iterable(logging_outputs)) sample_sizes = list(chain.from_iterable(sample_sizes)) ooms = sum(ooms) if (not self.args.use_bmuf): assert (all(((norm == prev_norms[0]) for norm in prev_norms)) or all(((math.isnan(norm) or math.isinf(norm)) for norm in prev_norms))), 'Fatal error: gradients are inconsistent between workers' self.meters['oom'].update(ooms, len(samples)) if (ooms == (self.args.distributed_world_size * len(samples))): print('| WARNING: OOM in all workers, skipping update') self.zero_grad() return None if (not self.fast_stat_sync): logging_output = self.task.aggregate_logging_outputs(logging_outputs, self.get_criterion()) sample_size = self.task.grad_denom(sample_sizes, self.get_criterion()) if (not all(((k in logging_output) for k in ['ntokens', 'nsentences']))): raise Exception('Please update the {}.aggregate_logging_outputs() method to return ntokens and nsentences'.format(self.task.__class__.__name__)) try: grad_norm = 0.0 self.set_num_updates((self.get_num_updates() + 1)) self.task.update_step(self._num_updates) ntokens = logging_output.get('ntokens', 0) nsentences = logging_output.get('nsentences', 0) self.meters['wps'].update(ntokens) self.meters['ups'].update(1.0) self.meters['wpb'].update(ntokens) self.meters['bsz'].update(nsentences) self.meters['gnorm'].update(grad_norm) self.meters['clip'].update((1.0 if ((grad_norm > self.args.clip_norm) and (self.args.clip_norm > 0)) else 0.0)) self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size) if ('train_acc' in self.meters): self.meters['train_acc'].update(logging_output.get('acc', 0), sample_size) if ('nll_loss' in logging_output): self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens) if ((self.args.empty_cache_freq > 0) and ((((self.get_num_updates() + self.args.empty_cache_freq) - 1) % self.args.empty_cache_freq) == 0) and torch.cuda.is_available() and (not self.args.cpu)): torch.cuda.empty_cache() except OverflowError as e: print(('| WARNING: overflow detected, ' + str(e))) self.zero_grad() logging_output = None if self.args.fp16: self.meters['loss_scale'].reset() self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale) self.clear_buffered_stats() self.meters['train_wall'].stop() return logging_output def valid_step(self, sample, raise_oom=False): 'Do forward pass in evaluation mode.' with torch.no_grad(): self.model.eval() self.criterion.eval() sample = self._prepare_sample(sample) if (sample is None): sample = self._prepare_sample(self._dummy_batch) ignore_results = True else: ignore_results = False try: (_loss, sample_size, logging_output) = self.task.valid_step(sample, self.model, self.criterion) except RuntimeError as e: if (('out of memory' in str(e)) and (not raise_oom)): print('| WARNING: ran out of memory, retrying batch') for p in self.model.parameters(): if (p.grad is not None): p.grad = None if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) else: raise e if ignore_results: (logging_output, sample_size) = ({}, 0) if (self.args.distributed_world_size > 1): (logging_output, sample_size) = zip(*distributed_utils.all_gather_list([logging_output, sample_size])) logging_output = list(logging_output) sample_size = list(sample_size) else: logging_output = [logging_output] sample_size = [sample_size] logging_output = self.task.aggregate_logging_outputs(logging_output, self.get_criterion()) sample_size = self.task.grad_denom(sample_size, self.get_criterion()) ntokens = logging_output.get('ntokens', 0) self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size) if ('valid_acc' in self.meters): self.meters['valid_acc'].update(logging_output.get('acc', 0), sample_size) if ('nll_loss' in logging_output): self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens) return logging_output def dummy_train_step(self, dummy_batch): 'Dummy training step for warming caching allocator.' self.train_step(dummy_batch, dummy_batch=True) self.zero_grad() def handle_ooms(self, number_of_ooms): '\n c10d accumulates/syncs gradients between gpus during backward pass.\n In case of OOMs, gpus may fail to sync, so we manually iterate\n extra to make sure each gpu makes same number of iterations.\n ' for _ in range(number_of_ooms): self.train_step([self._oom_batch], True) def zero_grad(self): self.optimizer.zero_grad() def clear_buffered_stats(self): self._all_reduce_list = ([0.0] * 6) def lr_step(self, epoch, val_loss=None): 'Adjust the learning rate based on the validation loss.' self.lr_scheduler.step(epoch, val_loss) return self.lr_step_update() def lr_step_update(self): 'Update the learning rate after each update.' return self.lr_scheduler.step_update(self.get_num_updates()) def get_lr(self): 'Get the current learning rate.' return self.optimizer.get_lr() def get_model(self): 'Get the (non-wrapped) model instance.' return self._model def get_criterion(self): 'Get the (non-wrapped) criterion instance.' return self._criterion def get_meter(self, name): 'Get a specific meter by name.' if (name not in self.meters): return None return self.meters[name] def get_num_updates(self): 'Get the number of parameters updates.' return self._num_updates def set_num_updates(self, num_updates): 'Set the number of parameters updates.' self._num_updates = num_updates self.lr_step_update() def _prepare_sample(self, sample): if ((sample is None) or (len(sample) == 0)): return None if self.cuda: sample = utils.move_to_cuda(sample) def apply_half(t): if (t.dtype is torch.float32): return t.half() return t if self.args.fp16: sample = utils.apply_to_sample(apply_half, sample) return sample def _set_seed(self): seed = (self.args.seed + self.get_num_updates()) torch.manual_seed(seed) if self.cuda: torch.cuda.manual_seed(seed) def _sync_stats(self): return ((self.args.distributed_world_size > 1) and ((not self.args.use_bmuf) or (self.args.use_bmuf and (((self.get_num_updates() + 1) % self.args.global_sync_iter) == 0))))
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'): if (args.log_format is None): args.log_format = (no_progress_bar if args.no_progress_bar else default) if ((args.log_format == 'tqdm') and (not sys.stderr.isatty())): args.log_format = 'simple' if (args.log_format == 'json'): bar = json_progress_bar(iterator, epoch, prefix, args.log_interval) elif (args.log_format == 'none'): bar = noop_progress_bar(iterator, epoch, prefix) elif (args.log_format == 'simple'): bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval) elif (args.log_format == 'tqdm'): bar = tqdm_progress_bar(iterator, epoch, prefix) else: raise ValueError('Unknown log format: {}'.format(args.log_format)) if (args.tbmf_wrapper and distributed_utils.is_master(args)): global g_tbmf_wrapper if (g_tbmf_wrapper is None): try: from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper except Exception: raise ImportError('fb_tbmf_wrapper package not found.') g_tbmf_wrapper = fb_tbmf_wrapper bar = g_tbmf_wrapper(bar, args, args.log_interval) elif (args.tensorboard_logdir and distributed_utils.is_master(args)): bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args) return bar
def format_stat(stat): if isinstance(stat, Number): stat = '{:g}'.format(stat) elif isinstance(stat, AverageMeter): stat = '{:.3f}'.format(stat.avg) elif isinstance(stat, TimeMeter): stat = '{:g}'.format(round(stat.avg)) elif isinstance(stat, StopwatchMeter): stat = '{:g}'.format(round(stat.sum)) return stat
class progress_bar(object): 'Abstract class for progress bars.' def __init__(self, iterable, epoch=None, prefix=None): self.iterable = iterable self.offset = getattr(iterable, 'offset', 0) self.epoch = epoch self.prefix = '' if (epoch is not None): self.prefix += '| epoch {:03d}'.format(epoch) if (prefix is not None): self.prefix += ' | {}'.format(prefix) def __len__(self): return len(self.iterable) def __enter__(self): return self def __exit__(self, *exc): return False def __iter__(self): raise NotImplementedError def log(self, stats, tag='', step=None): 'Log intermediate stats according to log_interval.' raise NotImplementedError def print(self, stats, tag='', step=None): 'Print end-of-epoch stats.' raise NotImplementedError def _str_commas(self, stats): return ', '.join((((key + '=') + stats[key].strip()) for key in stats.keys())) def _str_pipes(self, stats): return ' | '.join((((key + ' ') + stats[key].strip()) for key in stats.keys())) def _format_stats(self, stats): postfix = OrderedDict(stats) for key in postfix.keys(): postfix[key] = str(format_stat(postfix[key])) return postfix
class json_progress_bar(progress_bar): 'Log output in JSON format.' def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.stats = None def __iter__(self): size = float(len(self.iterable)) for (i, obj) in enumerate(self.iterable, start=self.offset): (yield obj) if ((self.stats is not None) and (i > 0) and (self.log_interval is not None) and ((i % self.log_interval) == 0)): update = (((self.epoch - 1) + float((i / size))) if (self.epoch is not None) else None) stats = self._format_stats(self.stats, epoch=self.epoch, update=update) print(json.dumps(stats), flush=True) def log(self, stats, tag='', step=None): 'Log intermediate stats according to log_interval.' self.stats = stats def print(self, stats, tag='', step=None): 'Print end-of-epoch stats.' self.stats = stats if (tag != ''): self.stats = OrderedDict([(((tag + '_') + k), v) for (k, v) in self.stats.items()]) stats = self._format_stats(self.stats, epoch=self.epoch) print(json.dumps(stats), flush=True) def _format_stats(self, stats, epoch=None, update=None): postfix = OrderedDict() if (epoch is not None): postfix['epoch'] = epoch if (update is not None): postfix['update'] = round(update, 3) for key in stats.keys(): postfix[key] = format_stat(stats[key]) return postfix
class noop_progress_bar(progress_bar): 'No logging.' def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) def __iter__(self): for obj in self.iterable: (yield obj) def log(self, stats, tag='', step=None): 'Log intermediate stats according to log_interval.' pass def print(self, stats, tag='', step=None): 'Print end-of-epoch stats.' pass
class simple_progress_bar(progress_bar): 'A minimal logger for non-TTY environments.' def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.stats = None def __iter__(self): size = len(self.iterable) for (i, obj) in enumerate(self.iterable, start=self.offset): (yield obj) if ((self.stats is not None) and (i > 0) and (self.log_interval is not None) and ((i % self.log_interval) == 0)): postfix = self._str_commas(self.stats) print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix), flush=True) def log(self, stats, tag='', step=None): 'Log intermediate stats according to log_interval.' self.stats = self._format_stats(stats) def print(self, stats, tag='', step=None): 'Print end-of-epoch stats.' postfix = self._str_pipes(self._format_stats(stats)) print('{} | {}'.format(self.prefix, postfix), flush=True)
class tqdm_progress_bar(progress_bar): 'Log to tqdm.' def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) from tqdm import tqdm self.tqdm = tqdm(iterable, self.prefix, leave=False) def __iter__(self): return iter(self.tqdm) def log(self, stats, tag='', step=None): 'Log intermediate stats according to log_interval.' self.tqdm.set_postfix(self._format_stats(stats), refresh=False) def print(self, stats, tag='', step=None): 'Print end-of-epoch stats.' postfix = self._str_pipes(self._format_stats(stats)) self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
class tensorboard_log_wrapper(progress_bar): 'Log to tensorboard.' def __init__(self, wrapped_bar, tensorboard_logdir, args): self.wrapped_bar = wrapped_bar self.tensorboard_logdir = tensorboard_logdir self.args = args try: from tensorboardX import SummaryWriter self.SummaryWriter = SummaryWriter self._writers = {} except ImportError: print('tensorboard or required dependencies not found, please see README for using tensorboard. (e.g. pip install tensorboardX)') self.SummaryWriter = None def _writer(self, key): if (self.SummaryWriter is None): return None if (key not in self._writers): self._writers[key] = self.SummaryWriter(os.path.join(self.tensorboard_logdir, key)) self._writers[key].add_text('args', str(vars(self.args))) self._writers[key].add_text('sys.argv', ' '.join(sys.argv)) return self._writers[key] def __iter__(self): return iter(self.wrapped_bar) def log(self, stats, tag='', step=None): 'Log intermediate stats to tensorboard.' self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.log(stats, tag=tag, step=step) def print(self, stats, tag='', step=None): 'Print end-of-epoch stats.' self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.print(stats, tag=tag, step=step) def __exit__(self, *exc): for writer in getattr(self, '_writers', {}).values(): writer.close() return False def _log_to_tensorboard(self, stats, tag='', step=None): writer = self._writer(tag) if (writer is None): return if (step is None): step = stats['num_updates'] for key in (stats.keys() - {'num_updates'}): if isinstance(stats[key], AverageMeter): writer.add_scalar(key, stats[key].val, step) elif isinstance(stats[key], Number): writer.add_scalar(key, stats[key], step)
def setup_registry(registry_name: str, base_class=None, default=None): assert registry_name.startswith('--') registry_name = registry_name[2:].replace('-', '_') REGISTRY = {} REGISTRY_CLASS_NAMES = set() if (registry_name in REGISTRIES): return REGISTRIES[registry_name] = {'registry': REGISTRY, 'default': default} def build_x(args, *extra_args, **extra_kwargs): choice = getattr(args, registry_name, None) if (choice is None): return None cls = REGISTRY[choice] if hasattr(cls, ('build_' + registry_name)): builder = getattr(cls, ('build_' + registry_name)) else: builder = cls set_defaults(args, cls) return builder(args, *extra_args, **extra_kwargs) def register_x(name): def register_x_cls(cls): if (name in REGISTRY): raise ValueError('Cannot register duplicate {} ({})'.format(registry_name, name)) if (cls.__name__ in REGISTRY_CLASS_NAMES): raise ValueError('Cannot register {} with duplicate class name ({})'.format(registry_name, cls.__name__)) if ((base_class is not None) and (not issubclass(cls, base_class))): raise ValueError('{} must extend {}'.format(cls.__name__, base_class.__name__)) REGISTRY[name] = cls REGISTRY_CLASS_NAMES.add(cls.__name__) return cls return register_x_cls return (build_x, register_x, REGISTRY)
def set_defaults(args, cls): 'Helper to set default arguments based on *add_args*.' if (not hasattr(cls, 'add_args')): return parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False) cls.add_args(parser) defaults = argparse.Namespace() for action in parser._actions: if (action.dest is not argparse.SUPPRESS): if (not hasattr(defaults, action.dest)): if (action.default is not argparse.SUPPRESS): setattr(defaults, action.dest, action.default) for (key, default_value) in vars(defaults).items(): if (not hasattr(args, key)): setattr(args, key, default_value)
def setup_task(args, **kwargs): return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name): "\n New tasks can be added to fairseq with the\n :func:`~fairseq.tasks.register_task` function decorator.\n\n For example::\n\n @register_task('classification')\n class ClassificationTask(FairseqTask):\n (...)\n\n .. note::\n\n All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`\n interface.\n\n Please see the\n\n Args:\n name (str): the name of the task\n " def register_task_cls(cls): if (name in TASK_REGISTRY): raise ValueError('Cannot register duplicate task ({})'.format(name)) if (not issubclass(cls, FairseqTask)): raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__)) if (cls.__name__ in TASK_CLASS_NAMES): raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__)) TASK_REGISTRY[name] = cls TASK_CLASS_NAMES.add(cls.__name__) return cls return register_task_cls
def get_task(name): return TASK_REGISTRY[name]
@register_task('audio_pretraining') class AudioPretrainingTask(FairseqTask): '\n\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='path to data directory') parser.add_argument('--sample-rate', default=16000, type=int, help='target sample rate. audio files will be up/down sampled to this rate') parser.add_argument('--max-sample-size', default=None, type=int, help='max sample size to crop to for batching. default = min sample length') parser.add_argument('--min-sample-size', default=None, type=int, help='min sample size to crop to for batching. default = same as --max-sample-size') def __init__(self, args): super().__init__(args) @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' return cls(args) def load_dataset(self, split, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' manifest = os.path.join(self.args.data, '{}.tsv'.format(split)) self.datasets[split] = FileAudioDataset(manifest, sample_rate=self.args.sample_rate, max_sample_size=self.args.max_sample_size, min_sample_size=self.args.min_sample_size) @property def target_dictionary(self): 'Return the :class:`~fairseq.data.Dictionary` for the language\n model.' return None
@register_task('cross_lingual_lm') class CrossLingualLMTask(FairseqTask): '\n Task for training cross-lingual language models.\n For more details look at: https://arxiv.org/pdf/1901.07291.pdf\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample') parser.add_argument('--monolingual-langs', default='en', type=str, help='comma separated list of languages for which we want to train XLM on') parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset') parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily') parser.add_argument('--shuffle', action='store_true', help='shuffle each monolingual dataset while training') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.distributed_world_size = args.distributed_world_size self.langs2id = self._lang_to_id(args.monolingual_langs) def _lang_to_id(self, languages: str): '\n Build a map from languages to ids. These ids are used as segment labels\n for cross-lingual LM training.\n ' lang2id = {} langs = [l.strip() for l in languages.split(',')] for (id, lang) in enumerate(langs): lang2id[lang] = id return lang2id @classmethod def load_dictionary(cls, filename): return MaskedLMDictionary.load(filename) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): d = MaskedLMDictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @property def target_dictionary(self): return self.dictionary @classmethod def setup_task(cls, args, **kwargs): 'Setup the task.\n ' dictionary = MaskedLMDictionary.load(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def _load_single_lang_dataset(self, split, epoch): loaded_datasets = [] paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) path = os.path.join(data_path, split_k) ds = data_utils.load_indexed_dataset(path, self.dictionary, self.args.dataset_impl) if (ds is None): if (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) loaded_datasets.append(TokenBlockDataset(ds, ds.sizes, (self.args.tokens_per_sample - 1), pad=self.dictionary.pad(), eos=self.dictionary.eos())) print('| {} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)]))) if (len(loaded_datasets) == 1): dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) return (dataset, sizes) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' dataset_map = OrderedDict() for lang in self.langs2id.keys(): language_split = '{}.{}'.format(split, lang) (block_dataset, sizes) = self._load_single_lang_dataset(split=language_split, epoch=epoch) dataset_map[lang] = MaskedLMDataset(dataset=block_dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.eos(), sep_token_idx=self.dictionary.eos(), shuffle=getattr(self.args, 'shuffle', False), has_pairs=False, segment_id=self.langs2id[lang], seed=self.seed) self.datasets[split] = MultiCorpusSampledDataset(dataset_map) print('| {} {} {} examples'.format(self.args.data.split(':')[epoch], split, len(self.datasets[split])))
class FairseqTask(object): '\n Tasks store dictionaries and provide helpers for loading/iterating over\n Datasets, initializing the Model/Criterion and calculating the loss.\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' pass def __init__(self, args): self.args = args self.datasets = {} self.dataset_to_epoch_iter = {} @classmethod def load_dictionary(cls, filename): 'Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n ' return Dictionary.load(filename) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): 'Build the dictionary\n\n Args:\n filenames (list): list of filenames\n workers (int): number of concurrent workers\n threshold (int): defines the minimum word count\n nwords (int): defines the total number of words in the final dictionary,\n including special symbols\n padding_factor (int): can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n ' d = Dictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' return cls(args, **kwargs) def load_dataset(self, split, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' raise NotImplementedError def dataset(self, split): '\n Return a loaded dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n\n Returns:\n a :class:`~fairseq.data.FairseqDataset` corresponding to *split*\n ' from fairseq.data import FairseqDataset if (split not in self.datasets): raise KeyError(('Dataset not loaded: ' + split)) if (not isinstance(self.datasets[split], FairseqDataset)): raise TypeError('Datasets are expected to be of type FairseqDataset') return self.datasets[split] def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0): "\n Get an iterator that yields batches of data from the given dataset.\n\n Args:\n dataset (~fairseq.data.FairseqDataset): dataset to batch\n max_tokens (int, optional): max number of tokens in each batch\n (default: None).\n max_sentences (int, optional): max number of sentences in each\n batch (default: None).\n max_positions (optional): max sentence length supported by the\n model (default: None).\n ignore_invalid_inputs (bool, optional): don't raise Exception for\n sentences that are too long (default: False).\n required_batch_size_multiple (int, optional): require batch size to\n be a multiple of N (default: 1).\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 0).\n Returns:\n ~fairseq.iterators.EpochBatchIterator: a batched iterator over the\n given dataset split\n " if (dataset in self.dataset_to_epoch_iter): return self.dataset_to_epoch_iter[dataset] assert isinstance(dataset, FairseqDataset) dataset.set_epoch(epoch) with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() if (max_positions is not None): indices = data_utils.filter_by_size(indices, dataset, max_positions, raise_exception=(not ignore_invalid_inputs)) batch_sampler = data_utils.batch_by_size(indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple) epoch_iter = iterators.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch) self.dataset_to_epoch_iter[dataset] = epoch_iter return epoch_iter def build_model(self, args): '\n Build the :class:`~fairseq.models.BaseFairseqModel` instance for this\n task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.models.BaseFairseqModel` instance\n ' from fairseq import models return models.build_model(args, self) def build_criterion(self, args): '\n Build the :class:`~fairseq.criterions.FairseqCriterion` instance for\n this task.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n\n Returns:\n a :class:`~fairseq.criterions.FairseqCriterion` instance\n ' from fairseq import criterions return criterions.build_criterion(args, self) def build_generator(self, args): if getattr(args, 'score_reference', False): from fairseq.sequence_scorer import SequenceScorer return SequenceScorer(self.target_dictionary) else: from fairseq.sequence_generator import SequenceGenerator, SequenceGeneratorWithAlignment if getattr(args, 'print_alignment', False): seq_gen_cls = SequenceGeneratorWithAlignment else: seq_gen_cls = SequenceGenerator return seq_gen_cls(self.target_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), normalize_scores=(not getattr(args, 'unnormalized', False)), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), sampling=getattr(args, 'sampling', False), sampling_topk=getattr(args, 'sampling_topk', (- 1)), sampling_topp=getattr(args, 'sampling_topp', (- 1.0)), temperature=getattr(args, 'temperature', 1.0), diverse_beam_groups=getattr(args, 'diverse_beam_groups', (- 1)), diverse_beam_strength=getattr(args, 'diverse_beam_strength', 0.5), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0)) def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): '\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.FairseqDataset`.\n model (~fairseq.models.BaseFairseqModel): the model\n criterion (~fairseq.criterions.FairseqCriterion): the criterion\n optimizer (~fairseq.optim.FairseqOptimizer): the optimizer\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n ' model.train() (loss, sample_size, logging_output) = criterion(model, sample) if ignore_grad: loss *= 0 optimizer.backward(loss) return (loss, sample_size, logging_output) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): (loss, sample_size, logging_output) = criterion(model, sample) return (loss, sample_size, logging_output) def inference_step(self, generator, models, sample, prefix_tokens=None): with torch.no_grad(): return generator.generate(models, sample, prefix_tokens=prefix_tokens) def update_step(self, num_updates): 'Task level update when number of update increases. This is called after optimization step and\n learning rate update of each step' pass def grad_denom(self, sample_sizes, criterion): return criterion.__class__.grad_denom(sample_sizes) def aggregate_logging_outputs(self, logging_outputs, criterion): return criterion.__class__.aggregate_logging_outputs(logging_outputs) def max_positions(self): 'Return the max input length allowed by the task.' return None @property def source_dictionary(self): 'Return the source :class:`~fairseq.data.Dictionary` (if applicable\n for this task).' raise NotImplementedError @property def target_dictionary(self): 'Return the target :class:`~fairseq.data.Dictionary` (if applicable\n for this task).' raise NotImplementedError
@register_task('language_modeling') class LanguageModelingTask(FairseqTask): '\n Train a language model.\n\n Args:\n dictionary (~fairseq.data.Dictionary): the dictionary for the input of\n the language model\n output_dictionary (~fairseq.data.Dictionary): the dictionary for the\n output of the language model. In most cases it will be the same as\n *dictionary*, but could possibly be a more limited version of the\n dictionary (if ``--output-dictionary-size`` is used).\n targets (List[str]): list of the target types that the language model\n should predict. Can be one of "self", "future", and "past".\n Defaults to "future".\n\n .. note::\n\n The language modeling task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate`, :mod:`fairseq-interactive` and\n :mod:`fairseq-eval-lm`.\n\n The language modeling task provides the following additional command-line\n arguments:\n\n .. argparse::\n :ref: fairseq.tasks.language_modeling_parser\n :prog:\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='path to data directory') parser.add_argument('--sample-break-mode', default='none', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=1024, type=int, help='max number of tokens per sample for LM dataset') parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily') parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset') parser.add_argument('--output-dictionary-size', default=(- 1), type=int, help='limit the size of output dictionary') parser.add_argument('--self-target', action='store_true', help='include self target') parser.add_argument('--future-target', action='store_true', help='include future target') parser.add_argument('--past-target', action='store_true', help='include past target') parser.add_argument('--add-bos-token', action='store_true', help='prepend beginning of sentence token (<s>)') parser.add_argument('--max-target-positions', type=int, metavar='N', help='max number of tokens in the target sequence') def __init__(self, args, dictionary, output_dictionary=None, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = (output_dictionary or dictionary) if (targets is None): targets = ['future'] self.targets = targets @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' if getattr(args, 'raw_text', False): utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw') args.dataset_impl = 'raw' elif getattr(args, 'lazy_load', False): utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy') args.dataset_impl = 'lazy' dictionary = None output_dictionary = None if args.data: paths = args.data.split(':') assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) output_dictionary = dictionary if (args.output_dictionary_size >= 0): output_dictionary = TruncatedDictionary(dictionary, args.output_dictionary_size) if hasattr(args, 'exclude_self_target'): args.self_target = (not args.exclude_self_target) targets = [] if getattr(args, 'self_target', False): targets.append('self') if getattr(args, 'future_target', False): targets.append('future') if getattr(args, 'past_target', False): targets.append('past') if (len(targets) == 0): targets = ['future'] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args): model = super().build_model(args) for target in self.targets: if (target not in model.supported_targets): raise ValueError('Unsupported language modeling target: {}'.format(target)) return model def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset(split_path, self.dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = TokenBlockDataset(dataset, dataset.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True) add_eos_for_other_targets = ((self.args.sample_break_mode is not None) and (self.args.sample_break_mode != 'none')) self.datasets[split] = MonolingualDataset(dataset, dataset.sizes, self.dictionary, self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True, targets=self.targets, add_bos_token=self.args.add_bos_token) def build_dataset_for_inference(self, src_tokens, src_lengths): return TransformEosDataset(MonolingualDataset(TokenBlockDataset(src_tokens, src_lengths, block_size=None, pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos', include_targets=False), src_lengths, self.source_dictionary, self.target_dictionary, add_eos_for_other_targets=False, shuffle=False, add_bos_token=self.args.add_bos_token), eos=self.source_dictionary.eos(), remove_eos_from_src=True, has_target=False) def inference_step(self, generator, models, sample, prefix_tokens=None): with torch.no_grad(): if ((prefix_tokens is None) and sample['net_input']['src_tokens'].nelement()): prefix_tokens = sample['net_input']['src_tokens'] return generator.generate(models, sample, prefix_tokens=prefix_tokens) @property def source_dictionary(self): 'Return the :class:`~fairseq.data.Dictionary` for the language\n model.' return self.dictionary @property def target_dictionary(self): 'Return the :class:`~fairseq.data.Dictionary` for the language\n model.' return self.output_dictionary
@register_task('legacy_masked_lm') class LegacyMaskedLMTask(FairseqTask): '\n Task for training Masked LM (BERT) model.\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--break-mode', default='doc', type=str, help='mode for breaking sentence') parser.add_argument('--shuffle-dataset', action='store_true', default=False) def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed @classmethod def load_dictionary(cls, filename): return BertDictionary.load(filename) @classmethod def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8): d = BertDictionary() for filename in filenames: Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers) d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor) return d @property def target_dictionary(self): return self.dictionary @classmethod def setup_task(cls, args, **kwargs): 'Setup the task.\n ' paths = args.data.split(':') assert (len(paths) > 0) dictionary = BertDictionary.load(os.path.join(paths[0], 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=0, combine=False): 'Load a given dataset split.\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' loaded_datasets = [] paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] print('| data_path', data_path) for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) path = os.path.join(data_path, split_k) ds = indexed_dataset.make_dataset(path, impl=self.args.dataset_impl, fix_lua_indexing=True, dictionary=self.dictionary) if (ds is None): if (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) with data_utils.numpy_seed((self.seed + k)): loaded_datasets.append(BlockPairDataset(ds, self.dictionary, ds.sizes, self.args.tokens_per_sample, break_mode=self.args.break_mode, doc_break_size=1)) print('| {} {} {} examples'.format(data_path, split_k, len(loaded_datasets[(- 1)]))) if (not combine): break if (len(loaded_datasets) == 1): dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) self.datasets[split] = MaskedLMDataset(dataset=dataset, sizes=sizes, vocab=self.dictionary, pad_idx=self.dictionary.pad(), mask_idx=self.dictionary.mask(), classif_token_idx=self.dictionary.cls(), sep_token_idx=self.dictionary.sep(), shuffle=self.args.shuffle_dataset, seed=self.seed)
@register_task('masked_lm') class MaskedLMTask(FairseqTask): 'Task for training masked language models (e.g., BERT, RoBERTa).' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--freq-weighted-replacement', action='store_true', help='sample random replacement words based on word frequencies') parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = dictionary.add_symbol('<mask>') @classmethod def setup_task(cls, args, **kwargs): paths = args.data.split(':') assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] split_path = os.path.join(data_path, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode) print('| loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) if self.args.mask_whole_words: bpe = encoders.build_bpe(self.args) if (bpe is not None): def is_beginning_of_word(i): if (i < self.source_dictionary.nspecial): return True tok = self.source_dictionary[i] if tok.startswith('madeupword'): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(self.source_dictionary))))) else: mask_whole_words = None (src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words) with data_utils.numpy_seed((self.args.seed + epoch)): shuffle = np.random.permutation(len(src_dataset)) self.datasets[split] = SortDataset(NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True)}, sizes=[src_dataset.sizes]), sort_order=[shuffle, src_dataset.sizes]) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
@register_task('multilingual_masked_lm') class MultiLingualMaskedLMTask(FairseqTask): 'Task for training masked language models (e.g., BERT, RoBERTa).' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--freq-weighted-replacement', action='store_true', help='sample random replacement words based on word frequencies') parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0, help='smoothing alpha for sample rations across multiple datasets') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = dictionary.add_symbol('<mask>') @classmethod def setup_task(cls, args, **kwargs): paths = args.data.split(':') assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def _get_whole_word_mask(self): if self.args.mask_whole_words: bpe = encoders.build_bpe(self.args) if (bpe is not None): def is_beginning_of_word(i): if (i < self.source_dictionary.nspecial): return True tok = self.source_dictionary[i] if tok.startswith('madeupword'): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(self.source_dictionary))))) else: mask_whole_words = None return mask_whole_words def _get_sample_prob(self, dataset_lens): '\n Get smoothed sampling porbability by languages. This helps low resource\n languages by upsampling them.\n ' prob = (dataset_lens / dataset_lens.sum()) smoothed_prob = (prob ** self.args.multilang_sampling_alpha) smoothed_prob = (smoothed_prob / smoothed_prob.sum()) return smoothed_prob def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] languages = [name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name))] print('| Training on {0} languages: {1}'.format(len(languages), languages)) print('| Language to id mapping: ', {lang: id for (id, lang) in enumerate(languages)}) mask_whole_words = self._get_whole_word_mask() lang_datasets = [] for (lang_id, language) in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode) print('| loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) (src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words) lang_dataset = NestedDictionaryDataset({'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), 'lang_id': RawLabelDataset(([lang_id] * src_dataset.sizes.shape[0]))}, sizes=[src_dataset.sizes]) lang_datasets.append(lang_dataset) if (split == self.args.train_subset): dataset_lengths = np.array([len(d) for d in lang_datasets], dtype=float) sample_probs = self._get_sample_prob(dataset_lengths) print('| Sample probability by language: ', {lang: '{0:.4f}'.format(sample_probs[id]) for (id, lang) in enumerate(languages)}) size_ratio = ((sample_probs * dataset_lengths.sum()) / dataset_lengths) print('| Up/Down Sampling ratio by language: ', {lang: '{0:.2f}'.format(size_ratio[id]) for (id, lang) in enumerate(languages)}) resampled_lang_datasets = [ResamplingDataset(lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=(size_ratio[i] >= 1.0)) for (i, d) in enumerate(lang_datasets)] dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for (lang_id, lang_dataset) in enumerate(lang_datasets): split_name = ((split + '_') + languages[lang_id]) lang_splits.append(split_name) self.datasets[split_name] = lang_dataset if (split in self.args.valid_subset): self.args.valid_subset = self.args.valid_subset.replace(split, ','.join(lang_splits)) with data_utils.numpy_seed((self.args.seed + epoch)): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset(dataset, sort_order=[shuffle, dataset.sizes]) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0): self.dataset_to_epoch_iter = None return super().get_batch_iterator(dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch) @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
def _lang_token(lang: str): return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str): 'Return language token index.' idx = dic.index(_lang_token(lang)) assert (idx != dic.unk_index), 'cannot find language token for lang {}'.format(lang) return idx
@register_task('multilingual_translation') class MultilingualTranslationTask(FairseqTask): 'A task for training multiple translation models simultaneously.\n\n We iterate round-robin over batches from multiple language pairs, ordered\n according to the `--lang-pairs` argument.\n\n The training loop is roughly:\n\n for i in range(len(epoch)):\n for lang_pair in args.lang_pairs:\n batch = next_batch_for_lang_pair(lang_pair)\n loss = criterion(model_for_lang_pair(lang_pair), batch)\n loss.backward()\n optimizer.step()\n\n In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset\n (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that\n implements the `FairseqMultiModel` interface.\n\n During inference it is required to specify a single `--source-lang` and\n `--target-lang`, which indicates the inference langauge direction.\n `--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to\n the same value as training.\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', metavar='DIR', help='path to data directory') parser.add_argument('--lang-pairs', default=None, metavar='PAIRS', help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language (only needed for inference)') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language (only needed for inference)') parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily') parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left (default: True)') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left (default: False)') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'], metavar='SRCTGT', help='replace beginning-of-sentence in source sentence with source or target language token. (src/tgt)') parser.add_argument('--decoder-langtok', action='store_true', help='replace beginning-of-sentence in target sentence with target language token') def __init__(self, args, dicts, training): super().__init__(args) self.dicts = dicts self.training = training if training: self.lang_pairs = args.lang_pairs (args.source_lang, args.target_lang) = args.lang_pairs[0].split('-') else: self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)] self.eval_lang_pairs = self.lang_pairs self.model_lang_pairs = self.lang_pairs self.langs = list(dicts.keys()) @classmethod def setup_task(cls, args, **kwargs): (dicts, training) = cls.prepare(args, **kwargs) return cls(args, dicts, training) @classmethod def prepare(cls, args, **kargs): args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) if getattr(args, 'raw_text', False): utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw') args.dataset_impl = 'raw' elif getattr(args, 'lazy_load', False): utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy') args.dataset_impl = 'lazy' if (args.lang_pairs is None): raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.') args.lang_pairs = args.lang_pairs.split(',') sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})) if ((args.source_lang is not None) or (args.target_lang is not None)): training = False else: training = True dicts = OrderedDict() for lang in sorted_langs: paths = args.data.split(':') assert (len(paths) > 0) dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang))) if (len(dicts) > 0): assert (dicts[lang].pad() == dicts[sorted_langs[0]].pad()) assert (dicts[lang].eos() == dicts[sorted_langs[0]].eos()) assert (dicts[lang].unk() == dicts[sorted_langs[0]].unk()) if ((args.encoder_langtok is not None) or args.decoder_langtok): for lang_to_add in sorted_langs: dicts[lang].add_symbol(_lang_token(lang_to_add)) print('| [{}] dictionary: {} types'.format(lang, len(dicts[lang]))) return (dicts, training) def get_encoder_langtok(self, src_lang, tgt_lang): if (self.args.encoder_langtok is None): return self.dicts[src_lang].eos() if (self.args.encoder_langtok == 'src'): return _lang_token_index(self.dicts[src_lang], src_lang) else: return _lang_token_index(self.dicts[src_lang], tgt_lang) def get_decoder_langtok(self, tgt_lang): if (not self.args.decoder_langtok): return self.dicts[tgt_lang].eos() return _lang_token_index(self.dicts[tgt_lang], tgt_lang) def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None): if ((self.args.encoder_langtok is None) and (not self.args.decoder_langtok)): return lang_pair_dataset new_src_eos = None if ((self.args.encoder_langtok is not None) and (src_eos is not None) and (src_lang is not None) and (tgt_lang is not None)): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang) else: src_eos = None new_tgt_bos = None if (self.args.decoder_langtok and (tgt_eos is not None) and (tgt_lang is not None)): new_tgt_bos = self.get_decoder_langtok(tgt_lang) else: tgt_eos = None return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos) def load_dataset(self, split, epoch=0, **kwargs): 'Load a dataset split.' paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] def language_pair_dataset(lang_pair): (src, tgt) = lang_pair.split('-') langpair_dataset = load_langpair_dataset(data_path, split, src, self.dicts[src], tgt, self.dicts[tgt], combine=True, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions) return self.alter_dataset_langtok(langpair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt) self.datasets[split] = RoundRobinZipDatasets(OrderedDict([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in self.lang_pairs]), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang)))) def build_dataset_for_inference(self, src_tokens, src_lengths): lang_pair = ('%s-%s' % (self.args.source_lang, self.args.target_lang)) return RoundRobinZipDatasets(OrderedDict([(lang_pair, self.alter_dataset_langtok(LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary), src_eos=self.source_dictionary.eos(), src_lang=self.args.source_lang, tgt_eos=self.target_dictionary.eos(), tgt_lang=self.args.target_lang))]), eval_key=lang_pair) def build_model(self, args): def check_args(): messages = [] if (len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0): messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs)) if (self.args.encoder_langtok != args.encoder_langtok): messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok)) if (self.args.decoder_langtok != args.decoder_langtok): messages.append('--decoder-langtok should {} be set.'.format(('' if args.decoder_langtok else 'not'))) if (len(messages) > 0): raise ValueError(' '.join(messages)) check_args() from fairseq import models model = models.build_model(args, self) if (not isinstance(model, FairseqMultiModel)): raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture') return model def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): model.train() (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {}) for lang_pair in self.model_lang_pairs: if ((sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)): continue (loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair]) if ignore_grad: loss *= 0 optimizer.backward(loss) agg_loss += loss.detach().item() agg_sample_size += sample_size agg_logging_output[lang_pair] = logging_output return (agg_loss, agg_sample_size, agg_logging_output) def valid_step(self, sample, model, criterion): model.eval() with torch.no_grad(): (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {}) for lang_pair in self.eval_lang_pairs: if ((lang_pair not in sample) or (sample[lang_pair] is None) or (len(sample[lang_pair]) == 0)): continue (loss, sample_size, logging_output) = criterion(model.models[lang_pair], sample[lang_pair]) agg_loss += loss.data.item() agg_sample_size += sample_size agg_logging_output[lang_pair] = logging_output return (agg_loss, agg_sample_size, agg_logging_output) def inference_step(self, generator, models, sample, prefix_tokens=None): with torch.no_grad(): return generator.generate(models, sample, prefix_tokens=prefix_tokens, bos_token=(_lang_token_index(self.target_dictionary, self.args.target_lang) if self.args.decoder_langtok else self.target_dictionary.eos())) def init_logging_output(self, sample): return {'ntokens': (sum((sample_lang.get('ntokens', 0) for sample_lang in sample.values())) if (sample is not None) else 0), 'nsentences': (sum(((sample_lang['target'].size(0) if ('target' in sample_lang) else 0) for sample_lang in sample.values())) if (sample is not None) else 0)} def grad_denom(self, sample_sizes, criterion): return criterion.__class__.grad_denom(sample_sizes) def aggregate_logging_outputs(self, logging_outputs, criterion, logging_output_keys=None): logging_output_keys = (logging_output_keys or self.eval_lang_pairs) agg_logging_outputs = {key: criterion.__class__.aggregate_logging_outputs([logging_output.get(key, {}) for logging_output in logging_outputs]) for key in logging_output_keys} def sum_over_languages(key): return sum((logging_output[key] for logging_output in agg_logging_outputs.values())) flat_logging_output = {'{}:{}'.format(lang_pair, k): v for (lang_pair, agg_logging_output) in agg_logging_outputs.items() for (k, v) in agg_logging_output.items()} flat_logging_output['loss'] = sum_over_languages('loss') if any((('nll_loss' in logging_output) for logging_output in agg_logging_outputs.values())): flat_logging_output['nll_loss'] = sum_over_languages('nll_loss') flat_logging_output['sample_size'] = sum_over_languages('sample_size') flat_logging_output['nsentences'] = sum_over_languages('nsentences') flat_logging_output['ntokens'] = sum_over_languages('ntokens') return flat_logging_output @property def source_dictionary(self): return self.dicts[self.args.source_lang] @property def target_dictionary(self): return self.dicts[self.args.target_lang] def max_positions(self): 'Return the max sentence length allowed by the task.' if (len(self.datasets.values()) == 0): return {('%s-%s' % (self.args.source_lang, self.args.target_lang)): (self.args.max_source_positions, self.args.max_target_positions)} return OrderedDict([(key, (self.args.max_source_positions, self.args.max_target_positions)) for split in self.datasets.keys() for key in self.datasets[split].datasets.keys()])
def _get_bt_dataset_key(lang_pair): return ('bt:' + lang_pair)
def _get_denoising_dataset_key(lang_pair): return ('denoising:' + lang_pair)
def parse_lambda_config(x): '\n Parse the configuration of lambda coefficient (for scheduling).\n x = "3" # lambda will be a constant equal to x\n x = "0:1,1000:0" # lambda will start from 1 and linearly decrease\n # to 0 during the first 1000 iterations\n x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000\n # iterations, then will linearly increase to 1 until iteration 2000\n ' split = x.split(',') if (len(split) == 1): return (float(x), None) else: split = [s.split(':') for s in split] assert all(((len(s) == 2) for s in split)) assert all((k.isdigit() for (k, _) in split)) assert all(((int(split[i][0]) < int(split[(i + 1)][0])) for i in range((len(split) - 1)))) return (float(split[0][1]), [(int(k), float(v)) for (k, v) in split])
@register_task('semisupervised_translation') class SemisupervisedTranslationTask(MultilingualTranslationTask): 'A task for training multiple translation models simultaneously.\n\n We iterate round-robin over batches from multiple language pairs, ordered\n according to the `--lang-pairs` argument.\n\n The training loop is roughly:\n\n for i in range(len(epoch)):\n for lang_pair in args.lang_pairs:\n batch = next_batch_for_lang_pair(lang_pair)\n loss = criterion(model_for_lang_pair(lang_pair), batch)\n loss.backward()\n optimizer.step()\n\n In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset\n (e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that\n implements the `FairseqMultiModel` interface.\n\n During inference it is required to specify a single `--source-lang` and\n `--target-lang`, instead of `--lang-pairs`.\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' MultilingualTranslationTask.add_args(parser) parser.add_argument('--lambda-parallel-config', default='1.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (parallel data). use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...') parser.add_argument('--lambda-denoising-config', default='0.0', type=str, metavar='CONFIG', help='Cross-entropy reconstruction coefficient (denoising autoencoding)use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...') parser.add_argument('--lambda-otf-bt-config', default='0.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...') parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the source length') parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the source length') parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N', help='beam size used in beam search of online back-translation') parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', help='maximum word shuffle distance for denoising autoencoding data generation') parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', help='word dropout probability for denoising autoencoding data generation') parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', help='word blanking probability for denoising autoencoding data generation') def __init__(self, args, dicts, training): super().__init__(args, dicts, training) (self.lambda_parallel, self.lambda_parallel_steps) = parse_lambda_config(args.lambda_parallel_config) (self.lambda_otf_bt, self.lambda_otf_bt_steps) = parse_lambda_config(args.lambda_otf_bt_config) (self.lambda_denoising, self.lambda_denoising_steps) = parse_lambda_config(args.lambda_denoising_config) if ((self.lambda_denoising > 0.0) or (self.lambda_denoising_steps is not None)): denoising_lang_pairs = [('%s-%s' % (tgt, tgt)) for tgt in {lang_pair.split('-')[1] for lang_pair in args.lang_pairs}] self.model_lang_pairs = (self.model_lang_pairs + denoising_lang_pairs) self.backtranslate_datasets = {} self.backtranslators = {} @classmethod def setup_task(cls, args, **kwargs): (dicts, training) = MultilingualTranslationTask.prepare(args, **kwargs) return cls(args, dicts, training) def load_dataset(self, split, epoch=0, **kwargs): 'Load a dataset split.' paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] def split_exists(split, src, tgt, lang): if (src is not None): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) else: filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, src, tgt)) if (self.args.raw_text and IndexedRawTextDataset.exists(filename)): return True elif ((not self.args.raw_text) and IndexedDataset.exists(filename)): return True return False def indexed_dataset(path, dictionary): if self.args.raw_text: return IndexedRawTextDataset(path, dictionary) elif IndexedDataset.exists(path): if self.args.lazy_load: return IndexedDataset(path, fix_lua_indexing=True) else: return IndexedCachedDataset(path, fix_lua_indexing=True) return None (src_datasets, tgt_datasets) = ({}, {}) if ((self.lambda_parallel > 0.0) or (self.lambda_parallel_steps is not None) or (not split.startswith('train'))): for lang_pair in self.lang_pairs: (src, tgt) = lang_pair.split('-') if split_exists(split, src, tgt, src): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, src, tgt)) elif split_exists(split, tgt, src, src): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, tgt, src)) else: continue src_datasets[lang_pair] = indexed_dataset((prefix + src), self.dicts[src]) tgt_datasets[lang_pair] = indexed_dataset((prefix + tgt), self.dicts[tgt]) print('| parallel-{} {} {} examples'.format(data_path, split, len(src_datasets[lang_pair]))) if (len(src_datasets) == 0): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) backtranslate_datasets = {} if (((self.lambda_otf_bt > 0.0) or (self.lambda_otf_bt_steps is not None)) and split.startswith('train')): for lang_pair in self.lang_pairs: (src, tgt) = lang_pair.split('-') if (not split_exists(split, tgt, None, tgt)): raise FileNotFoundError('Dataset not found: backtranslation {} ({})'.format(split, data_path)) filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt)) dataset = indexed_dataset(filename, self.dicts[tgt]) lang_pair_dataset_tgt = LanguagePairDataset(dataset, dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target) lang_pair_dataset = LanguagePairDataset(dataset, dataset.sizes, src_dict=self.dicts[src], tgt=dataset, tgt_sizes=dataset.sizes, tgt_dict=self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target) backtranslate_datasets[lang_pair] = BacktranslationDataset(tgt_dataset=self.alter_dataset_langtok(lang_pair_dataset_tgt, src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_lang=src), backtranslation_fn=self.backtranslators[lang_pair], src_dict=self.dicts[src], tgt_dict=self.dicts[tgt], output_collater=self.alter_dataset_langtok(lang_pair_dataset=lang_pair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt).collater) print('| backtranslate-{}: {} {} {} examples'.format(tgt, data_path, split, len(backtranslate_datasets[lang_pair]))) self.backtranslate_datasets[lang_pair] = backtranslate_datasets[lang_pair] noising_datasets = {} if (((self.lambda_denoising > 0.0) or (self.lambda_denoising_steps is not None)) and split.startswith('train')): for lang_pair in self.lang_pairs: (_, tgt) = lang_pair.split('-') if (not split_exists(split, tgt, None, tgt)): continue filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt)) tgt_dataset1 = indexed_dataset(filename, self.dicts[tgt]) tgt_dataset2 = indexed_dataset(filename, self.dicts[tgt]) noising_dataset = NoisingDataset(tgt_dataset1, self.dicts[tgt], seed=1, max_word_shuffle_distance=self.args.max_word_shuffle_distance, word_dropout_prob=self.args.word_dropout_prob, word_blanking_prob=self.args.word_blanking_prob) noising_datasets[lang_pair] = self.alter_dataset_langtok(LanguagePairDataset(noising_dataset, tgt_dataset1.sizes, self.dicts[tgt], tgt_dataset2, tgt_dataset2.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target), src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt) print('| denoising-{}: {} {} {} examples'.format(tgt, data_path, split, len(noising_datasets[lang_pair]))) def language_pair_dataset(lang_pair): (src, tgt) = lang_pair.split('-') (src_dataset, tgt_dataset) = (src_datasets[lang_pair], tgt_datasets[lang_pair]) return self.alter_dataset_langtok(LanguagePairDataset(src_dataset, src_dataset.sizes, self.dicts[src], tgt_dataset, tgt_dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions), self.dicts[src].eos(), src, self.dicts[tgt].eos(), tgt) self.datasets[split] = RoundRobinZipDatasets(OrderedDict((([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in src_datasets.keys()] + [(_get_bt_dataset_key(lang_pair), dataset) for (lang_pair, dataset) in backtranslate_datasets.items()]) + [(_get_denoising_dataset_key(lang_pair), dataset) for (lang_pair, dataset) in noising_datasets.items()])), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang)))) def build_model(self, args): from fairseq import models model = models.build_model(args, self) if (not isinstance(model, FairseqMultiModel)): raise ValueError('SemisupervisedTranslationTask requires a FairseqMultiModel architecture') self.sequence_generators = {} if (((self.lambda_otf_bt > 0.0) or (self.lambda_otf_bt_steps is not None)) and self.training): for lang_pair in self.lang_pairs: (src, tgt) = lang_pair.split('-') key = '{}-{}'.format(tgt, src) self.sequence_generators[key] = SequenceGenerator(tgt_dict=self.dicts[src], beam_size=args.bt_beam_size, max_len_a=args.bt_max_len_a, max_len_b=args.bt_max_len_b) decoder_lang_tok_idx = self.get_decoder_langtok(src) def backtranslate_fn(sample, model=model.models[key], bos_token=decoder_lang_tok_idx, sequence_generator=self.sequence_generators[key]): return sequence_generator.generate([model], sample, bos_token=bos_token) self.backtranslators[lang_pair] = backtranslate_fn return model def train_step(self, sample, model, criterion, optimizer, ignore_grad=False): model.train() (agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {}) def forward_backward(model, samples, logging_output_key, weight): nonlocal agg_loss, agg_sample_size, agg_logging_output if ((samples is None) or (len(samples) == 0)): return (loss, sample_size, logging_output) = criterion(model, samples) if ignore_grad: loss *= 0 else: loss *= weight optimizer.backward(loss) agg_loss += loss.detach().item() agg_sample_size += sample_size agg_logging_output[logging_output_key] = logging_output if (self.lambda_parallel > 0.0): for lang_pair in self.lang_pairs: forward_backward(model.models[lang_pair], sample[lang_pair], lang_pair, self.lambda_parallel) if (self.lambda_otf_bt > 0.0): for lang_pair in self.lang_pairs: sample_key = _get_bt_dataset_key(lang_pair) forward_backward(model.models[lang_pair], sample[sample_key], sample_key, self.lambda_otf_bt) if (self.lambda_denoising > 0.0): for lang_pair in self.lang_pairs: (_, tgt) = lang_pair.split('-') sample_key = _get_denoising_dataset_key(lang_pair) forward_backward(model.models['{0}-{0}'.format(tgt)], sample[sample_key], sample_key, self.lambda_denoising) return (agg_loss, agg_sample_size, agg_logging_output) def update_step(self, num_updates): def lambda_step_func(config, n_iter): '\n Update a lambda value according to its schedule configuration.\n ' ranges = [i for i in range((len(config) - 1)) if (config[i][0] <= n_iter < config[(i + 1)][0])] if (len(ranges) == 0): assert (n_iter >= config[(- 1)][0]) return config[(- 1)][1] assert (len(ranges) == 1) i = ranges[0] (x_a, y_a) = config[i] (x_b, y_b) = config[(i + 1)] return (y_a + (((n_iter - x_a) * float((y_b - y_a))) / float((x_b - x_a)))) if (self.lambda_parallel_steps is not None): self.lambda_parallel = lambda_step_func(self.lambda_parallel_steps, num_updates) if (self.lambda_denoising_steps is not None): self.lambda_denoising = lambda_step_func(self.lambda_denoising_steps, num_updates) if (self.lambda_otf_bt_steps is not None): self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates) def aggregate_logging_outputs(self, logging_outputs, criterion): logging_output_keys = {key for logging_output in logging_outputs for key in logging_output} lang_pair_keys = set(((self.lang_pairs + [_get_bt_dataset_key(lang_pair) for lang_pair in self.lang_pairs]) + [_get_denoising_dataset_key(lang_pair) for lang_pair in self.lang_pairs])) logging_output_keys = logging_output_keys.intersection(lang_pair_keys) return super().aggregate_logging_outputs(logging_outputs, criterion, logging_output_keys)
@register_task('sentence_prediction') class SentencePredictionTask(FairseqTask): '\n Sentence (or sentence pair) prediction (classification or regression) task.\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', metavar='FILE', help='file prefix for data') parser.add_argument('--num-classes', type=int, default=(- 1), help='number of classes') parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item') parser.add_argument('--separator-token', type=int, default=None, help='add separator token between inputs') parser.add_argument('--regression-target', action='store_true', default=False) parser.add_argument('--no-shuffle', action='store_true', default=False) parser.add_argument('--truncate-sequence', action='store_true', default=False, help='Truncate sequence to max_sequence_length') def __init__(self, args, data_dictionary, label_dictionary): super().__init__(args) self.dictionary = data_dictionary self.label_dictionary = label_dictionary @classmethod def load_dictionary(cls, args, filename, source=True): 'Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n ' dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary @classmethod def setup_task(cls, args, **kwargs): assert (args.num_classes > 0), 'Must set --num-classes' args.tokens_per_sample = args.max_positions data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True) print('| [input] dictionary: {} types'.format(len(data_dict))) label_dict = None if (not args.regression_target): label_dict = cls.load_dictionary(args, os.path.join(args.data, 'label', 'dict.txt'), source=False) print('| [label] dictionary: {} types'.format(len(label_dict))) else: label_dict = data_dict return SentencePredictionTask(args, data_dict, label_dict) def load_dataset(self, split, combine=False, **kwargs): 'Load a given dataset split (e.g., train, valid, test).' def get_path(type, split): return os.path.join(self.args.data, type, split) def make_dataset(type, dictionary): split_path = get_path(type, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) return dataset input0 = make_dataset('input0', self.source_dictionary) assert (input0 is not None), 'could not find dataset: {}'.format(get_path(type, split)) input1 = make_dataset('input1', self.source_dictionary) if (self.args.init_token is not None): input0 = PrependTokenDataset(input0, self.args.init_token) if (input1 is None): src_tokens = input0 else: if (self.args.separator_token is not None): input1 = PrependTokenDataset(input1, self.args.separator_token) src_tokens = ConcatSentencesDataset(input0, input1) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(src_tokens)) if self.args.truncate_sequence: src_tokens = TruncateDataset(src_tokens, self.args.max_positions) dataset = {'id': IdDataset(), 'net_input': {'src_tokens': RightPadDataset(src_tokens, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens, reduce=False)}, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens, reduce=True)} if (not self.args.regression_target): label_dataset = make_dataset('label', self.target_dictionary) if (label_dataset is not None): dataset.update(target=OffsetTokensDataset(StripTokenDataset(label_dataset, id_to_strip=self.target_dictionary.eos()), offset=(- self.target_dictionary.nspecial))) else: label_path = '{0}.label'.format(get_path('label', split)) if os.path.exists(label_path): dataset.update(target=RawLabelDataset([float(x.strip()) for x in open(label_path).readlines()])) nested_dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes]) if self.args.no_shuffle: dataset = nested_dataset else: dataset = SortDataset(nested_dataset, sort_order=[shuffle]) print('| Loaded {0} with #samples: {1}'.format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head('sentence_classification_head', num_classes=self.args.num_classes) return model def max_positions(self): return self.args.max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.label_dictionary
@register_task('sentence_ranking') class SentenceRankingTask(FairseqTask): '\n Ranking task on multiple sentences.\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', metavar='FILE', help='file prefix for data') parser.add_argument('--num-classes', type=int, help='number of sentences to be ranked') parser.add_argument('--init-token', type=int, help='add token at the beginning of each batch item') parser.add_argument('--separator-token', type=int, help='add separator token between inputs') parser.add_argument('--no-shuffle', action='store_true') parser.add_argument('--truncate-sequence', action='store_true', help='Truncate sequence to max_positions') parser.add_argument('--max-option-length', type=int, help='max length for each option') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary @classmethod def load_dictionary(cls, args, filename, source=True): 'Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n ' dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary @classmethod def setup_task(cls, args, **kwargs): assert (args.criterion == 'sentence_ranking'), 'Must set --criterion=sentence_ranking' data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True) print('| [input] dictionary: {} types'.format(len(data_dict))) return SentenceRankingTask(args, data_dict) def load_dataset(self, split, combine=False, **kwargs): 'Load a given dataset split (e.g., train, valid, test).' def get_path(type, split): return os.path.join(self.args.data, type, split) def make_dataset(type, dictionary): split_path = get_path(type, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) return dataset input0 = make_dataset('input0', self.source_dictionary) input_options = [make_dataset('input{idx}'.format(idx=(idx + 1)), self.source_dictionary) for idx in range(self.args.num_classes)] if (self.args.separator_token is not None): input0 = PrependTokenDataset(input0, self.args.separator_token) src_tokens = [] for input_option in input_options: if (self.args.init_token is not None): input_option = PrependTokenDataset(input_option, self.args.init_token) if (self.args.max_option_length is not None): input_option = TruncateDataset(input_option, self.args.max_option_length) src_token = ConcatSentencesDataset(input_option, input0) if self.args.truncate_sequence: src_token = TruncateDataset(src_token, self.args.max_positions) src_tokens.append(src_token) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(src_tokens[0])) dataset = {'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True)} for src_token_idx in range(len(src_tokens)): dataset.update({'net_input{idx}'.format(idx=(src_token_idx + 1)): {'src_tokens': RightPadDataset(src_tokens[src_token_idx], pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens[src_token_idx], reduce=False)}}) label_path = '{}.label'.format(get_path('label', split)) if os.path.exists(label_path): with open(label_path) as h: dataset.update(target=RawLabelDataset([int(x.strip()) for x in h.readlines()])) nested_dataset = NestedDictionaryDataset(dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])]) if self.args.no_shuffle: dataset = nested_dataset else: dataset = SortDataset(nested_dataset, sort_order=[shuffle]) print('| Loaded {0} with #samples: {1}'.format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head('sentence_classification_head', num_classes=1) return model def max_positions(self): return self.args.max_positions @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary
def load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False): def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src)) elif (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) src_datasets.append(data_utils.load_indexed_dataset((prefix + src), src_dict, dataset_impl)) tgt_datasets.append(data_utils.load_indexed_dataset((prefix + tgt), tgt_dict, dataset_impl)) print('| {} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)]))) if (not combine): break assert (len(src_datasets) == len(tgt_datasets)) if (len(src_datasets) == 1): (src_dataset, tgt_dataset) = (src_datasets[0], tgt_datasets[0]) else: sample_ratios = ([1] * len(src_datasets)) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) if prepend_bos: assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index')) src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) align_dataset = None if load_alignments: align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt)) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl) return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset.sizes, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, max_source_positions=max_source_positions, max_target_positions=max_target_positions, align_dataset=align_dataset)
@register_task('translation') class TranslationTask(FairseqTask): '\n Translate from one (source) language to another (target) language.\n\n Args:\n src_dict (~fairseq.data.Dictionary): dictionary for the source language\n tgt_dict (~fairseq.data.Dictionary): dictionary for the target language\n\n .. note::\n\n The translation task is compatible with :mod:`fairseq-train`,\n :mod:`fairseq-generate` and :mod:`fairseq-interactive`.\n\n The translation task provides the following additional command-line\n arguments:\n\n .. argparse::\n :ref: fairseq.tasks.translation_parser\n :prog:\n ' @staticmethod def add_args(parser): 'Add task-specific arguments to the parser.' parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily') parser.add_argument('--raw-text', action='store_true', help='load raw text dataset') parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') def __init__(self, args, src_dict, tgt_dict): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): 'Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n ' args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) if getattr(args, 'raw_text', False): utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw') args.dataset_impl = 'raw' elif getattr(args, 'lazy_load', False): utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy') args.dataset_impl = 'lazy' paths = args.data.split(':') assert (len(paths) > 0) if ((args.source_lang is None) or (args.target_lang is None)): (args.source_lang, args.target_lang) = data_utils.infer_language_pair(paths[0]) if ((args.source_lang is None) or (args.target_lang is None)): raise Exception('Could not infer language pair, please provide it explicitly') src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang))) tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang))) assert (src_dict.pad() == tgt_dict.pad()) assert (src_dict.eos() == tgt_dict.eos()) assert (src_dict.unk() == tgt_dict.unk()) print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict))) print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict))) return cls(args, src_dict, tgt_dict) def load_dataset(self, split, epoch=0, combine=False, **kwargs): 'Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n ' paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] (src, tgt) = (self.args.source_lang, self.args.target_lang) self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments) def build_dataset_for_inference(self, src_tokens, src_lengths): return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary) def max_positions(self): 'Return the max sentence length allowed by the task.' return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): 'Return the source :class:`~fairseq.data.Dictionary`.' return self.src_dict @property def target_dictionary(self): 'Return the target :class:`~fairseq.data.Dictionary`.' return self.tgt_dict
@register_task('translation_from_pretrained_xlm') class TranslationFromPretrainedXLMTask(TranslationTask): '\n Same as TranslationTask except use the MaskedLMDictionary class so that\n we can load data that was binarized with the MaskedLMDictionary class.\n\n This task should be used for the entire training pipeline when we want to\n train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,\n training NMT with the pretrained XLM checkpoint, and subsequent evaluation\n of that trained model.\n ' @classmethod def load_dictionary(cls, filename): 'Load the masked LM dictionary from the filename\n\n Args:\n filename (str): the filename\n ' return MaskedLMDictionary.load(filename)
def tokenize_line(line): line = SPACE_NORMALIZER.sub(' ', line) line = line.strip() return line.split()
class Trainer(object): 'Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n ' def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None): self.args = args self.task = task self._criterion = criterion self._model = model self.cuda = (torch.cuda.is_available() and (not args.cpu)) if args.fp16: self._criterion = self._criterion.half() self._model = self._model.half() if self.cuda: self._criterion = self._criterion.cuda() self._model = self._model.cuda() self._dummy_batch = dummy_batch self._oom_batch = (oom_batch or dummy_batch) self._lr_scheduler = None self._num_updates = 0 self._optim_history = None self._optimizer = None self._prev_grad_norm = None self._wrapped_criterion = None self._wrapped_model = None self._all_reduce_list = ([0.0] * 6) self.fast_stat_sync = args.fast_stat_sync self.init_meters(args) def init_meters(self, args): self.meters = OrderedDict() self.meters['train_loss'] = AverageMeter() self.meters['train_nll_loss'] = AverageMeter() self.meters['valid_loss'] = AverageMeter() self.meters['valid_nll_loss'] = AverageMeter() self.meters['wps'] = TimeMeter() self.meters['ups'] = TimeMeter() self.meters['wpb'] = AverageMeter() self.meters['bsz'] = AverageMeter() self.meters['gnorm'] = AverageMeter() self.meters['clip'] = AverageMeter() self.meters['oom'] = AverageMeter() if args.fp16: self.meters['loss_scale'] = AverageMeter() self.meters['wall'] = TimeMeter() self.meters['train_wall'] = StopwatchMeter() @property def criterion(self): if (self._wrapped_criterion is None): if (utils.has_parameters(self._criterion) and (self.args.distributed_world_size > 1) and (not self.args.use_bmuf)): self._wrapped_criterion = models.DistributedFairseqModel(self.args, self._criterion) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion @property def model(self): if (self._wrapped_model is None): if ((self.args.distributed_world_size > 1) and (not self.args.use_bmuf)): self._wrapped_model = models.DistributedFairseqModel(self.args, self._model) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): if (self._optimizer is None): self._build_optimizer() return self._optimizer @property def lr_scheduler(self): if (self._lr_scheduler is None): self._build_optimizer() return self._lr_scheduler def _build_optimizer(self): params = list(filter((lambda p: p.requires_grad), chain(self.model.parameters(), self.criterion.parameters()))) if self.args.fp16: if (self.cuda and (torch.cuda.get_device_capability(0)[0] < 7)): print('| WARNING: your device does NOT support faster training with --fp16, please switch to FP32 which is likely to be faster') if self.args.memory_efficient_fp16: self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.args, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params) else: if (self.cuda and (torch.cuda.get_device_capability(0)[0] >= 7)): print('| NOTICE: your device may support faster training with --fp16') self._optimizer = optim.build_optimizer(self.args, params) if self.args.use_bmuf: self._optimizer = optim.FairseqBMUF(self.args, self._optimizer) self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer) self._lr_scheduler.step_update(0) def save_checkpoint(self, filename, extra_state): 'Save all training state in a checkpoint file.' if distributed_utils.is_master(self.args): extra_state['train_meters'] = self.meters checkpoint_utils.save_state(filename, self.args, self.get_model().state_dict(), self.get_criterion(), self.optimizer, self.lr_scheduler, self.get_num_updates(), self._optim_history, extra_state) def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False): 'Load all training state from a checkpoint file.' (extra_state, self._optim_history, last_optim_state) = (None, [], None) try: from fairseq.fb_pathmgr import fb_pathmgr bexists = fb_pathmgr.isfile(filename) except Exception: bexists = os.path.exists(filename) if bexists: state = checkpoint_utils.load_checkpoint_to_cpu(filename) try: self.get_model().load_state_dict(state['model'], strict=True, args=self.args) if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict(state['criterion'], strict=True) except Exception: raise Exception('Cannot load model parameters from checkpoint {}; please ensure that the architectures match.'.format(filename)) extra_state = state['extra_state'] self._optim_history = state['optimizer_history'] last_optim_state = state.get('last_optimizer_state', None) if ((last_optim_state is not None) and (not reset_optimizer)): self._build_optimizer() last_optim = self._optim_history[(- 1)] assert (last_optim['criterion_name'] == self.get_criterion().__class__.__name__), 'Criterion does not match; please reset the optimizer (--reset-optimizer).' assert (last_optim['optimizer_name'] == self.optimizer.__class__.__name__), 'Optimizer does not match; please reset the optimizer (--reset-optimizer).' if (not reset_lr_scheduler): self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state']) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim['num_updates']) if (extra_state is not None): epoch = extra_state['train_iterator']['epoch'] print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(filename, epoch, self.get_num_updates())) self.lr_step(epoch) if (('train_meters' in extra_state) and (not reset_meters)): self.meters.update(extra_state['train_meters']) del extra_state['train_meters'] for meter in self.meters.values(): if isinstance(meter, TimeMeter): meter.reset() else: print('| no existing checkpoint found {}'.format(filename)) return extra_state def get_train_iterator(self, epoch, combine=True, load_dataset=True, data_selector=None): 'Return an EpochBatchIterator over the training set for a given epoch.' if load_dataset: print('| loading train data for epoch {}'.format(epoch)) self.task.load_dataset(self.args.train_subset, epoch=epoch, combine=combine, data_selector=data_selector) return self.task.get_batch_iterator(dataset=self.task.dataset(self.args.train_subset), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions()), ignore_invalid_inputs=True, required_batch_size_multiple=self.args.required_batch_size_multiple, seed=self.args.seed, num_shards=self.args.distributed_world_size, shard_id=self.args.distributed_rank, num_workers=self.args.num_workers, epoch=epoch) def train_step(self, samples, dummy_batch=False, raise_oom=False): 'Do forward, backward and parameter update.' if (self._dummy_batch is None): self._dummy_batch = samples[0] self._set_seed() self.model.train() self.criterion.train() self.zero_grad() if (not dummy_batch): self.meters['train_wall'].start() (logging_outputs, sample_sizes, ooms) = ([], [], 0) for (i, sample) in enumerate(samples): sample = self._prepare_sample(sample) if (sample is None): sample = self._prepare_sample(self._dummy_batch) ignore_grad = True else: ignore_grad = False def maybe_no_sync(): '\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n ' if ((self.args.distributed_world_size > 1) and hasattr(self.model, 'no_sync') and (i < (len(samples) - 1))): return self.model.no_sync() else: return contextlib.ExitStack() try: with maybe_no_sync(): (loss, sample_size, logging_output) = self.task.train_step(sample, self.model, self.criterion, self.optimizer, ignore_grad) if (not ignore_grad): logging_outputs.append(logging_output) sample_sizes.append(sample_size) if self.fast_stat_sync: self._all_reduce_list[0] += sample_size self._all_reduce_list[1] += logging_output.get('nsentences', 0.0) self._all_reduce_list[2] += logging_output.get('loss', 0.0) self._all_reduce_list[3] += logging_output.get('nll_loss', 0.0) self._all_reduce_list[4] += logging_output.get('ntokens', 0.0) except RuntimeError as e: if ('out of memory' in str(e)): msg = (('| WARNING: ran out of memory with exception: ' + '{};'.format(e)) + '\n Skipping batch') print(msg, file=sys.stderr) if (torch.cuda.is_available() and hasattr(torch.cuda, 'memory_summary')): for device_idx in range(torch.cuda.device_count()): print(torch.cuda.memory_summary(device=device_idx), file=sys.stderr) sys.stderr.flush() if raise_oom: raise ValueError(msg) ooms += 1 self.zero_grad() else: raise e if self.fast_stat_sync: self._all_reduce_list[5] += ooms if ((ooms > 0) and (self._oom_batch is not None)): self.handle_ooms(ooms) if dummy_batch: return None if self.fast_stat_sync: all_reduce_list_tensor = torch.cuda.DoubleTensor(self._all_reduce_list) if self._sync_stats(): torch.distributed.all_reduce(all_reduce_list_tensor) all_reduce_list_tensor[2:4].div_((all_reduce_list_tensor[0:1] * torch.log(torch.cuda.DoubleTensor([2])))) self._all_reduce_list = all_reduce_list_tensor.tolist() logging_output = {} [sample_size, logging_output['nsentences'], logging_output['loss'], logging_output['nll_loss'], logging_output['ntokens'], ooms] = self._all_reduce_list elif self._sync_stats(): (logging_outputs, sample_sizes, ooms, prev_norms) = zip(*distributed_utils.all_gather_list([logging_outputs, sample_sizes, ooms, self._prev_grad_norm])) logging_outputs = list(chain.from_iterable(logging_outputs)) sample_sizes = list(chain.from_iterable(sample_sizes)) ooms = sum(ooms) if (not self.args.use_bmuf): assert (all(((norm == prev_norms[0]) for norm in prev_norms)) or all(((math.isnan(norm) or math.isinf(norm)) for norm in prev_norms))), 'Fatal error: gradients are inconsistent between workers' self.meters['oom'].update(ooms, len(samples)) if (ooms == (self.args.distributed_world_size * len(samples))): print('| WARNING: OOM in all workers, skipping update') self.zero_grad() return None if (not self.fast_stat_sync): logging_output = self.task.aggregate_logging_outputs(logging_outputs, self.get_criterion()) sample_size = self.task.grad_denom(sample_sizes, self.get_criterion()) if (not all(((k in logging_output) for k in ['ntokens', 'nsentences']))): raise Exception('Please update the {}.aggregate_logging_outputs() method to return ntokens and nsentences'.format(self.task.__class__.__name__)) try: if (sample_size > 0): self.optimizer.multiply_grads((self.args.distributed_world_size / float(sample_size))) grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm) self._prev_grad_norm = grad_norm self.optimizer.step() self.set_num_updates((self.get_num_updates() + 1)) self.task.update_step(self._num_updates) ntokens = logging_output.get('ntokens', 0) nsentences = logging_output.get('nsentences', 0) self.meters['wps'].update(ntokens) self.meters['ups'].update(1.0) self.meters['wpb'].update(ntokens) self.meters['bsz'].update(nsentences) self.meters['gnorm'].update(grad_norm) self.meters['clip'].update((1.0 if ((grad_norm > self.args.clip_norm) and (self.args.clip_norm > 0)) else 0.0)) self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size) if ('train_acc' in self.meters): self.meters['train_acc'].update(logging_output.get('acc', 0), sample_size) if ('nll_loss' in logging_output): self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens) if ((self.args.empty_cache_freq > 0) and ((((self.get_num_updates() + self.args.empty_cache_freq) - 1) % self.args.empty_cache_freq) == 0) and torch.cuda.is_available() and (not self.args.cpu)): torch.cuda.empty_cache() except OverflowError as e: print(('| WARNING: overflow detected, ' + str(e))) self.zero_grad() logging_output = None if self.args.fp16: self.meters['loss_scale'].reset() self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale) self.clear_buffered_stats() self.meters['train_wall'].stop() return logging_output def valid_step(self, sample, raise_oom=False): 'Do forward pass in evaluation mode.' with torch.no_grad(): self.model.eval() self.criterion.eval() sample = self._prepare_sample(sample) if (sample is None): sample = self._prepare_sample(self._dummy_batch) ignore_results = True else: ignore_results = False try: (_loss, sample_size, logging_output) = self.task.valid_step(sample, self.model, self.criterion) except RuntimeError as e: if (('out of memory' in str(e)) and (not raise_oom)): print('| WARNING: ran out of memory, retrying batch') for p in self.model.parameters(): if (p.grad is not None): p.grad = None if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) else: raise e if ignore_results: (logging_output, sample_size) = ({}, 0) if (self.args.distributed_world_size > 1): (logging_output, sample_size) = zip(*distributed_utils.all_gather_list([logging_output, sample_size])) logging_output = list(logging_output) sample_size = list(sample_size) else: logging_output = [logging_output] sample_size = [sample_size] logging_output = self.task.aggregate_logging_outputs(logging_output, self.get_criterion()) sample_size = self.task.grad_denom(sample_size, self.get_criterion()) ntokens = logging_output.get('ntokens', 0) self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size) if ('valid_acc' in self.meters): self.meters['valid_acc'].update(logging_output.get('acc', 0), sample_size) if ('nll_loss' in logging_output): self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens) return logging_output def dummy_train_step(self, dummy_batch): 'Dummy training step for warming caching allocator.' self.train_step(dummy_batch, dummy_batch=True) self.zero_grad() def handle_ooms(self, number_of_ooms): '\n c10d accumulates/syncs gradients between gpus during backward pass.\n In case of OOMs, gpus may fail to sync, so we manually iterate\n extra to make sure each gpu makes same number of iterations.\n ' for _ in range(number_of_ooms): self.train_step([self._oom_batch], True) def zero_grad(self): self.optimizer.zero_grad() def clear_buffered_stats(self): self._all_reduce_list = ([0.0] * 6) def lr_step(self, epoch, val_loss=None): 'Adjust the learning rate based on the validation loss.' self.lr_scheduler.step(epoch, val_loss) return self.lr_step_update() def lr_step_update(self): 'Update the learning rate after each update.' return self.lr_scheduler.step_update(self.get_num_updates()) def get_lr(self): 'Get the current learning rate.' return self.optimizer.get_lr() def get_model(self): 'Get the (non-wrapped) model instance.' return self._model def get_criterion(self): 'Get the (non-wrapped) criterion instance.' return self._criterion def get_meter(self, name): 'Get a specific meter by name.' if (name not in self.meters): return None return self.meters[name] def get_num_updates(self): 'Get the number of parameters updates.' return self._num_updates def set_num_updates(self, num_updates): 'Set the number of parameters updates.' self._num_updates = num_updates self.lr_step_update() def _prepare_sample(self, sample): if ((sample is None) or (len(sample) == 0)): return None if self.cuda: sample = utils.move_to_cuda(sample) def apply_half(t): if (t.dtype is torch.float32): return t.half() return t if self.args.fp16: sample = utils.apply_to_sample(apply_half, sample) return sample def _set_seed(self): seed = (self.args.seed + self.get_num_updates()) torch.manual_seed(seed) if self.cuda: torch.cuda.manual_seed(seed) def _sync_stats(self): return ((self.args.distributed_world_size > 1) and ((not self.args.use_bmuf) or (self.args.use_bmuf and (((self.get_num_updates() + 1) % self.args.global_sync_iter) == 0))))
class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): ' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n also keeps track of how many of those we have seen ' if (next_word_prob is not None): self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, (self.count - self.missing_next_words))
def main(parsed_args): assert (parsed_args.path is not None), '--path required for evaluation!' utils.import_user_module(parsed_args) print(parsed_args) use_cuda = (torch.cuda.is_available() and (not parsed_args.cpu)) task = tasks.setup_task(parsed_args) print('| loading model(s) from {}'.format(parsed_args.path)) (models, args) = checkpoint_utils.load_model_ensemble(parsed_args.path.split(':'), arg_overrides=eval(parsed_args.model_overrides), task=task) for arg in vars(parsed_args).keys(): if (arg not in {'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token'}): setattr(args, arg, getattr(parsed_args, arg)) args.tokens_per_sample -= args.context_window task = tasks.setup_task(args) task.load_dataset(args.gen_subset) dataset = task.dataset(args.gen_subset) if (args.context_window > 0): dataset = LMContextWindowDataset(dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad()) print('| {} {} {} examples'.format(args.data, args.gen_subset, len(dataset))) for model in models: model.make_generation_fast_() if args.fp16: model.half() if use_cuda: model.cuda() assert (len(models) > 0) print('num. model params: {}'.format(sum((p.numel() for p in models[0].parameters())))) itr = task.get_batch_iterator(dataset=dataset, max_tokens=(args.max_tokens or 36000), max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers).next_epoch_itr(shuffle=False) gen_timer = StopwatchMeter() scorer = SequenceScorer(task.target_dictionary, args.softmax_batch) score_sum = 0.0 count = 0 if (args.remove_bpe is not None): if (args.remove_bpe == 'sentencepiece'): raise NotImplementedError else: bpe_cont = args.remove_bpe.rstrip() bpe_toks = set((i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont))) bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: if ('net_input' not in sample): continue sample = (utils.move_to_cuda(sample) if use_cuda else sample) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample['ntokens']) for (i, hypos_i) in enumerate(hypos): hypo = hypos_i[0] sample_id = sample['id'][i] tokens = hypo['tokens'] tgt_len = tokens.numel() pos_scores = hypo['positional_scores'].float() if args.add_bos_token: assert (hypo['tokens'][0].item() == task.target_dictionary.bos()) tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if (bpe_toks is not None): for i in range((tgt_len - 1)): if (tokens[i].item() in bpe_toks): skipped_toks += 1 pos_scores[(i + 1)] += pos_scores[i] pos_scores[i] = 0 inf_scores = (pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))) if inf_scores.any(): print('| Skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()])) pos_scores = pos_scores[(~ inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += (pos_scores.numel() - skipped_toks) if (args.output_word_probs or args.output_word_stats): w = '' word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += task.source_dictionary[w_ind] if ((bpe_toks is not None) and (w_ind in bpe_toks)): w = w[:(- bpe_len)] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = (i + 1) while (ind < len(tokens)): if (pos_scores[ind].item() != 0): next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob) is_bpe = False w = '' if args.output_word_probs: print(((str(int(sample_id)) + ' ') + '\t'.join(('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob)))) wps_meter.update(sample['ntokens']) t.log({'wps': round(wps_meter.avg)}) avg_nll_loss = ((- score_sum) / count) print('| Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, (1.0 / gen_timer.avg))) print('| Loss: {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, np.exp(avg_nll_loss))) if args.output_word_stats: for ws in sorted(word_stats.values(), key=(lambda x: x.count), reverse=True): print(ws)
def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) main(args)
def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded('utf-8')) as h: for src_str in h: buffer.append(src_str.strip()) if (len(buffer) >= buffer_size): (yield buffer) buffer = [] if (len(buffer) > 0): (yield buffer)
def make_batches(lines, args, task, max_positions, encode_fn): tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines] lengths = torch.LongTensor([t.numel() for t in tokens]) itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions).next_epoch_itr(shuffle=False) for batch in itr: (yield Batch(ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths']))
def main(args): utils.import_user_module(args) if (args.buffer_size < 1): args.buffer_size = 1 if ((args.max_tokens is None) and (args.max_sentences is None)): args.max_sentences = 1 assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --beam' assert ((not args.max_sentences) or (args.max_sentences <= args.buffer_size)), '--max-sentences/--batch-size cannot be larger than --buffer-size' print(args) use_cuda = (torch.cuda.is_available() and (not args.cpu)) task = tasks.setup_task(args) print('| loading model(s) from {}'.format(args.path)) (models, _model_args) = checkpoint_utils.load_model_ensemble(args.path.split(':'), arg_overrides=eval(args.model_overrides), task=task) src_dict = task.source_dictionary tgt_dict = task.target_dictionary for model in models: model.make_generation_fast_(beamable_mm_beam_size=(None if args.no_beamable_mm else args.beam), need_attn=args.print_alignment) if args.fp16: model.half() if use_cuda: model.cuda() generator = task.build_generator(args) tokenizer = encoders.build_tokenizer(args) bpe = encoders.build_bpe(args) def encode_fn(x): if (tokenizer is not None): x = tokenizer.encode(x) if (bpe is not None): x = bpe.encode(x) return x def decode_fn(x): if (bpe is not None): x = bpe.decode(x) if (tokenizer is not None): x = tokenizer.decode(x) return x align_dict = utils.load_align_dict(args.replace_unk) max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models]) if (args.buffer_size > 1): print('| Sentence buffer size:', args.buffer_size) print('| Type the input sentence and press return:') start_id = 0 for inputs in buffered_read(args.input, args.buffer_size): results = [] for batch in make_batches(inputs, args, task, max_positions, encode_fn): src_tokens = batch.src_tokens src_lengths = batch.src_lengths if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}} translations = task.inference_step(generator, models, sample) for (i, (id, hypos)) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) results.append(((start_id + id), src_tokens_i, hypos)) for (id, src_tokens, hypos) in sorted(results, key=(lambda x: x[0])): if (src_dict is not None): src_str = src_dict.string(src_tokens, args.remove_bpe) print('S-{}\t{}'.format(id, src_str)) for hypo in hypos[:min(len(hypos), args.nbest)]: (hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe) hypo_str = decode_fn(hypo_str) print('H-{}\t{}\t{}'.format(id, hypo['score'], hypo_str)) print('P-{}\t{}'.format(id, ' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist())))) if args.print_alignment: alignment_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in alignment]) print('A-{}\t{}'.format(id, alignment_str)) start_id += len(inputs)
def cli_main(): parser = options.get_generation_parser(interactive=True) args = options.parse_args_and_arch(parser) main(args)
def main(args): utils.import_user_module(args) print(args) os.makedirs(args.destdir, exist_ok=True) target = (not args.only_source) task = tasks.get_task(args.task) def train_path(lang): return '{}{}'.format(args.trainpref, (('.' + lang) if lang else '')) def file_name(prefix, lang): fname = prefix if (lang is not None): fname += '.{lang}'.format(lang=lang) return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return (dest_path('dict', lang) + '.txt') def build_dictionary(filenames, src=False, tgt=False): assert (src ^ tgt) return task.build_dictionary(filenames, workers=args.workers, threshold=(args.thresholdsrc if src else args.thresholdtgt), nwords=(args.nwordssrc if src else args.nwordstgt), padding_factor=args.padding_factor) if ((not args.srcdict) and os.path.exists(dict_path(args.source_lang))): raise FileExistsError(dict_path(args.source_lang)) if (target and (not args.tgtdict) and os.path.exists(dict_path(args.target_lang))): raise FileExistsError(dict_path(args.target_lang)) if args.joined_dictionary: assert ((not args.srcdict) or (not args.tgtdict)), 'cannot use both --srcdict and --tgtdict with --joined-dictionary' if args.srcdict: src_dict = task.load_dictionary(args.srcdict) elif args.tgtdict: src_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, '--trainpref must be set if --srcdict is not specified' src_dict = build_dictionary({train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True) tgt_dict = src_dict else: if args.srcdict: src_dict = task.load_dictionary(args.srcdict) else: assert args.trainpref, '--trainpref must be set if --srcdict is not specified' src_dict = build_dictionary([train_path(args.source_lang)], src=True) if target: if args.tgtdict: tgt_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, '--trainpref must be set if --tgtdict is not specified' tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True) else: tgt_dict = None src_dict.save(dict_path(args.source_lang)) if (target and (tgt_dict is not None)): tgt_dict.save(dict_path(args.target_lang)) def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers): print('| [{}] Dictionary: {} types'.format(lang, (len(vocab) - 1))) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result['replaced']) n_seq_tok[0] += worker_result['nseq'] n_seq_tok[1] += worker_result['ntok'] input_file = '{}{}'.format(input_prefix, (('.' + lang) if (lang is not None) else '')) offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) pool.apply_async(binarize, (args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab)) merge_result(Binarizer.binarize(input_file, vocab, (lambda t: ds.add_item(t)), offset=0, end=offsets[1])) if (num_workers > 1): pool.join() for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(lang, input_file, n_seq_tok[0], n_seq_tok[1], ((100 * sum(replaced.values())) / n_seq_tok[1]), vocab.unk_word)) def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers): nseq = [0] def merge_result(worker_result): nseq[0] += worker_result['nseq'] input_file = input_prefix offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) pool.apply_async(binarize_alignments, (args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl) merge_result(Binarizer.binarize_alignments(input_file, utils.parse_alignment, (lambda t: ds.add_item(t)), offset=0, end=offsets[1])) if (num_workers > 1): pool.join() for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, None) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx')) print('| [alignments] {}: parsed {} alignments'.format(input_file, nseq[0])) def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1): if (args.dataset_impl == 'raw'): output_text_file = dest_path((output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang)), lang) shutil.copyfile(file_name(input_prefix, lang), output_text_file) else: make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers) def make_all(lang, vocab): if args.trainpref: make_dataset(vocab, args.trainpref, 'train', lang, num_workers=args.workers) if args.validpref: for (k, validpref) in enumerate(args.validpref.split(',')): outprefix = ('valid{}'.format(k) if (k > 0) else 'valid') make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers) if args.testpref: for (k, testpref) in enumerate(args.testpref.split(',')): outprefix = ('test{}'.format(k) if (k > 0) else 'test') make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers) def make_all_alignments(): if (args.trainpref and os.path.exists(((args.trainpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.trainpref + '.') + args.align_suffix), 'train.align', num_workers=args.workers) if (args.validpref and os.path.exists(((args.validpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.validpref + '.') + args.align_suffix), 'valid.align', num_workers=args.workers) if (args.testpref and os.path.exists(((args.testpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.testpref + '.') + args.align_suffix), 'test.align', num_workers=args.workers) make_all(args.source_lang, src_dict) if target: make_all(args.target_lang, tgt_dict) if args.align_suffix: make_all_alignments() print('| Wrote preprocessed data to {}'.format(args.destdir)) if args.alignfile: assert args.trainpref, '--trainpref must be set if --alignfile is specified' src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) freq_map = {} with open(args.alignfile, 'r', encoding='utf-8') as align_file: with open(src_file_name, 'r', encoding='utf-8') as src_file: with open(tgt_file_name, 'r', encoding='utf-8') as tgt_file: for (a, s, t) in zip_longest(align_file, src_file, tgt_file): si = src_dict.encode_line(s, add_if_not_exist=False) ti = tgt_dict.encode_line(t, add_if_not_exist=False) ai = list(map((lambda x: tuple(x.split('-'))), a.split())) for (sai, tai) in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if ((srcidx != src_dict.unk()) and (tgtidx != tgt_dict.unk())): assert (srcidx != src_dict.pad()) assert (srcidx != src_dict.eos()) assert (tgtidx != tgt_dict.pad()) assert (tgtidx != tgt_dict.eos()) if (srcidx not in freq_map): freq_map[srcidx] = {} if (tgtidx not in freq_map[srcidx]): freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(args.source_lang, args.target_lang)), 'w', encoding='utf-8') as f: for (k, v) in align_dict.items(): print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab)) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl, vocab_size=None) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx')) return res
def dataset_dest_prefix(args, output_prefix, lang): base = '{}/{}'.format(args.destdir, output_prefix) if (lang is not None): lang_part = '.{}-{}.{}'.format(args.source_lang, args.target_lang, lang) elif args.only_source: lang_part = '' else: lang_part = '.{}-{}'.format(args.source_lang, args.target_lang) return '{}{}'.format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return '{}.{}'.format(base, extension)
def get_offsets(input_file, num_workers): return Binarizer.find_offsets(input_file, num_workers)
def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args)
def get_parser(): parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') return parser
def main(): parser = get_parser() args = parser.parse_args() print(args) assert ((args.sys == '-') or os.path.exists(args.sys)), 'System output file {} does not exist'.format(args.sys) assert os.path.exists(args.ref), 'Reference file {} does not exist'.format(args.ref) dict = dictionary.Dictionary() def readlines(fd): for line in fd.readlines(): if args.ignore_case: (yield line.lower()) else: (yield line) if args.sacrebleu: import sacrebleu def score(fdsys): with open(args.ref) as fdref: print(sacrebleu.corpus_bleu(fdsys, [fdref])) elif args.sentence_bleu: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for (i, (sys_tok, ref_tok)) in enumerate(zip(readlines(fdsys), readlines(fdref))): scorer.reset(one_init=True) sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(i, scorer.result_string(args.order)) else: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for (sys_tok, ref_tok) in zip(readlines(fdsys), readlines(fdref)): sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(scorer.result_string(args.order)) if (args.sys == '-'): score(sys.stdin) else: with open(args.sys, 'r') as f: score(f)
class NumpyExtension(Extension): 'Source: https://stackoverflow.com/a/54128391' def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return (self.__include_dirs + [numpy.get_include()]) @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs
def main(args, init_distributed=False): utils.import_user_module(args) try: from fairseq.fb_pathmgr import fb_pathmgr global fb_pathmgr_registerd if (not fb_pathmgr_registerd): fb_pathmgr.register() fb_pathmgr_registerd = True except (ModuleNotFoundError, ImportError): pass assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences' if (torch.cuda.is_available() and (not args.cpu)): torch.cuda.set_device(args.device_id) np.random.seed(args.seed) torch.manual_seed(args.seed) if init_distributed: args.distributed_rank = distributed_utils.distributed_init(args) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) print(args) task = tasks.setup_task(args) for valid_sub_split in args.valid_subset.split(','): task.load_dataset(valid_sub_split, combine=False, epoch=0) model = task.build_model(args) criterion = task.build_criterion(args) print(model) print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__)) print('| num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad)))) trainer = Trainer(args, task, model, criterion) print('| training on {} GPUs'.format(args.distributed_world_size)) print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences)) (extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer) max_epoch = (args.max_epoch or math.inf) max_update = (args.max_update or math.inf) lr = trainer.get_lr() train_meter = StopwatchMeter() train_meter.start() valid_subsets = args.valid_subset.split(',') if (not hasattr(checkpoint_utils.save_checkpoint, 'not_best')): checkpoint_utils.save_checkpoint.not_best = 0 while ((lr > args.min_lr) and (epoch_itr.epoch < max_epoch) and (trainer.get_num_updates() < max_update)): train(args, trainer, task, epoch_itr) if ((not args.disable_validation) and ((epoch_itr.epoch % args.validate_interval) == 0)): valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) if (args.early_stop > 0): if (hasattr(checkpoint_utils.save_checkpoint, 'best') and (valid_losses[0] > checkpoint_utils.save_checkpoint.best)): checkpoint_utils.save_checkpoint.not_best += 1 print('| Not the best ckpt... not best:', checkpoint_utils.save_checkpoint.not_best) if (checkpoint_utils.save_checkpoint.not_best > args.early_stop): print('| Early stop...') break else: checkpoint_utils.save_checkpoint.not_best = 0 else: valid_losses = [None] lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) if ((epoch_itr.epoch % args.save_interval) == 0): checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) reload_dataset = (':' in getattr(args, 'data', '')) epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset) train_meter.stop() print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr): 'Train the model for one epoch.' update_freq = (args.update_freq[(epoch_itr.epoch - 1)] if (epoch_itr.epoch <= len(args.update_freq)) else args.update_freq[(- 1)]) itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.epoch >= args.curriculum)) itr = iterators.GroupedIterator(itr, update_freq) progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple') extra_meters = collections.defaultdict((lambda : AverageMeter())) valid_subsets = args.valid_subset.split(',') max_update = (args.max_update or math.inf) for (i, samples) in enumerate(progress, start=epoch_itr.iterations_in_epoch): log_output = trainer.train_step(samples) if (log_output is None): continue stats = get_training_stats(trainer) for (k, v) in log_output.items(): if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']): continue if (('loss' in k) or (k == 'accuracy')): extra_meters[k].update(v, log_output['sample_size']) else: extra_meters[k].update(v) stats[k] = extra_meters[k].avg progress.log(stats, tag='train', step=stats['num_updates']) if (i == 0): trainer.get_meter('wps').reset() trainer.get_meter('ups').reset() num_updates = trainer.get_num_updates() if ((not args.disable_validation) and (args.save_interval_updates > 0) and ((num_updates % args.save_interval_updates) == 0) and (num_updates > 0)): valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) if (num_updates >= max_update): break stats = get_training_stats(trainer) for (k, meter) in extra_meters.items(): stats[k] = meter.avg progress.print(stats, tag='train', step=stats['num_updates']) for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip']: meter = trainer.get_meter(k) if (meter is not None): meter.reset()
def get_training_stats(trainer): stats = collections.OrderedDict() stats['loss'] = trainer.get_meter('train_loss') if (trainer.get_meter('train_nll_loss').count > 0): nll_loss = trainer.get_meter('train_nll_loss') stats['nll_loss'] = nll_loss else: nll_loss = trainer.get_meter('train_loss') stats['ppl'] = utils.get_perplexity(nll_loss.avg) stats['wps'] = trainer.get_meter('wps') stats['ups'] = trainer.get_meter('ups') stats['wpb'] = trainer.get_meter('wpb') stats['bsz'] = trainer.get_meter('bsz') stats['num_updates'] = trainer.get_num_updates() stats['lr'] = trainer.get_lr() stats['gnorm'] = trainer.get_meter('gnorm') stats['clip'] = trainer.get_meter('clip') stats['oom'] = trainer.get_meter('oom') if (trainer.get_meter('loss_scale') is not None): stats['loss_scale'] = trainer.get_meter('loss_scale') stats['wall'] = round(trainer.get_meter('wall').elapsed_time) stats['train_wall'] = trainer.get_meter('train_wall') return stats
def validate(args, trainer, task, epoch_itr, subsets): 'Evaluate the model on the validation set(s) and return the losses.' if (args.fixed_validation_seed is not None): utils.set_torch_seed(args.fixed_validation_seed) valid_losses = [] for subset in subsets: itr = task.get_batch_iterator(dataset=task.dataset(subset), max_tokens=args.max_tokens_valid, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions(task.max_positions(), trainer.get_model().max_positions()), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, num_workers=args.num_workers).next_epoch_itr(shuffle=False) progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple') for k in ['valid_loss', 'valid_nll_loss']: meter = trainer.get_meter(k) if (meter is not None): meter.reset() extra_meters = collections.defaultdict((lambda : AverageMeter())) for sample in progress: log_output = trainer.valid_step(sample) for (k, v) in log_output.items(): if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']): continue extra_meters[k].update(v) stats = get_valid_stats(trainer, args, extra_meters) for (k, meter) in extra_meters.items(): stats[k] = meter.avg progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append((stats[args.best_checkpoint_metric].avg if (args.best_checkpoint_metric == 'loss') else stats[args.best_checkpoint_metric])) return valid_losses
def get_valid_stats(trainer, args, extra_meters=None): stats = collections.OrderedDict() stats['loss'] = trainer.get_meter('valid_loss') if (trainer.get_meter('valid_nll_loss').count > 0): nll_loss = trainer.get_meter('valid_nll_loss') stats['nll_loss'] = nll_loss else: nll_loss = stats['loss'] stats['ppl'] = utils.get_perplexity(nll_loss.avg) stats['num_updates'] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, 'best'): key = 'best_{0}'.format(args.best_checkpoint_metric) best_function = (max if args.maximize_best_checkpoint_metric else min) current_metric = None if (args.best_checkpoint_metric == 'loss'): current_metric = stats['loss'].avg elif (args.best_checkpoint_metric in extra_meters): current_metric = extra_meters[args.best_checkpoint_metric].avg elif (args.best_checkpoint_metric in stats): current_metric = stats[args.best_checkpoint_metric] else: raise ValueError('best_checkpoint_metric not found in logs') stats[key] = best_function(checkpoint_utils.save_checkpoint.best, current_metric) return stats
def distributed_main(i, args, start_rank=0): args.device_id = i if (args.distributed_rank is None): args.distributed_rank = (start_rank + i) main(args, init_distributed=True)
def cli_main(): parser = options.get_training_parser() args = options.parse_args_and_arch(parser) if (args.distributed_init_method is None): distributed_utils.infer_init_method(args) if (args.distributed_init_method is not None): if ((torch.cuda.device_count() > 1) and (not args.distributed_no_spawn)): start_rank = args.distributed_rank args.distributed_rank = None torch.multiprocessing.spawn(fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count()) else: distributed_main(args.device_id, args) elif (args.distributed_world_size > 1): assert (args.distributed_world_size <= torch.cuda.device_count()) port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) args.distributed_rank = None if ((max(args.update_freq) > 1) and (args.ddp_backend != 'no_c10d')): print('| NOTE: you may get better performance with: --ddp-backend=no_c10d') torch.multiprocessing.spawn(fn=distributed_main, args=(args,), nprocs=args.distributed_world_size) else: main(args)
def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded('utf-8')) as h: for src_str in h: buffer.append(src_str.strip()) if (len(buffer) >= buffer_size): (yield buffer) buffer = [] if (len(buffer) > 0): (yield buffer)
def make_batches(lines, args, task, max_positions, encode_fn): tokens = [task.source_dictionary.encode_line(encode_fn(src_str), add_if_not_exist=False).long() for src_str in lines] lengths = torch.LongTensor([t.numel() for t in tokens]) itr = task.get_batch_iterator(dataset=task.build_dataset_for_inference(tokens, lengths), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions).next_epoch_itr(shuffle=False) for batch in itr: (yield Batch(ids=batch['id'], src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths']))
def main(args): utils.import_user_module(args) if (args.buffer_size < 1): args.buffer_size = 1 if ((args.max_tokens is None) and (args.max_sentences is None)): args.max_sentences = 1 assert ((not args.sampling) or (args.nbest == args.beam)), '--sampling requires --nbest to be equal to --beam' assert ((not args.max_sentences) or (args.max_sentences <= args.buffer_size)), '--max-sentences/--batch-size cannot be larger than --buffer-size' print(args) use_cuda = (torch.cuda.is_available() and (not args.cpu)) task = tasks.setup_task(args) print('| loading model(s) from {}'.format(args.path)) (models, _model_args) = checkpoint_utils.load_model_ensemble(args.path.split(':'), arg_overrides=eval(args.model_overrides), task=task) src_dict = task.source_dictionary tgt_dict = task.target_dictionary for model in models: model.make_generation_fast_(beamable_mm_beam_size=(None if args.no_beamable_mm else args.beam), need_attn=args.print_alignment) if args.fp16: model.half() if use_cuda: model.cuda() generator = task.build_generator(args) tokenizer = encoders.build_tokenizer(args) bpe = encoders.build_bpe(args) def encode_fn(x): if (tokenizer is not None): x = tokenizer.encode(x) if (bpe is not None): x = bpe.encode(x) return x def decode_fn(x): if (bpe is not None): x = bpe.decode(x) if (tokenizer is not None): x = tokenizer.decode(x) return x align_dict = utils.load_align_dict(args.replace_unk) max_positions = utils.resolve_max_positions(task.max_positions(), *[model.max_positions() for model in models]) if (args.buffer_size > 1): print('| Sentence buffer size:', args.buffer_size) print('| Type the input sentence and press return:') start_id = 0 for inputs in buffered_read(args.input, args.buffer_size): results = [] for batch in make_batches(inputs, args, task, max_positions, encode_fn): src_tokens = batch.src_tokens src_lengths = batch.src_lengths if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}} translations = task.inference_step(generator, models, sample) for (i, (id, hypos)) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) results.append(((start_id + id), src_tokens_i, hypos)) for (id, src_tokens, hypos) in sorted(results, key=(lambda x: x[0])): if (src_dict is not None): src_str = src_dict.string(src_tokens, args.remove_bpe) print('S-{}\t{}'.format(id, src_str)) for hypo in hypos[:min(len(hypos), args.nbest)]: (hypo_tokens, hypo_str, alignment) = utils.post_process_prediction(hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe) hypo_str = decode_fn(hypo_str) print('H-{}\t{}\t{}'.format(id, hypo['score'], hypo_str)) print('P-{}\t{}'.format(id, ' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist())))) if args.print_alignment: alignment_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in alignment]) print('A-{}\t{}'.format(id, alignment_str)) start_id += len(inputs)
def cli_main(): parser = options.get_generation_parser(interactive=True) args = options.parse_args_and_arch(parser) main(args)
def main(args): utils.import_user_module(args) print(args) os.makedirs(args.destdir, exist_ok=True) target = (not args.only_source) task = tasks.get_task(args.task) def train_path(lang): return '{}{}'.format(args.trainpref, (('.' + lang) if lang else '')) def file_name(prefix, lang): fname = prefix if (lang is not None): fname += '.{lang}'.format(lang=lang) return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return (dest_path('dict', lang) + '.txt') def build_dictionary(filenames, src=False, tgt=False): assert (src ^ tgt) return task.build_dictionary(filenames, workers=args.workers, threshold=(args.thresholdsrc if src else args.thresholdtgt), nwords=(args.nwordssrc if src else args.nwordstgt), padding_factor=args.padding_factor) if ((not args.srcdict) and os.path.exists(dict_path(args.source_lang))): raise FileExistsError(dict_path(args.source_lang)) if (target and (not args.tgtdict) and os.path.exists(dict_path(args.target_lang))): raise FileExistsError(dict_path(args.target_lang)) if args.joined_dictionary: assert ((not args.srcdict) or (not args.tgtdict)), 'cannot use both --srcdict and --tgtdict with --joined-dictionary' if args.srcdict: src_dict = task.load_dictionary(args.srcdict) elif args.tgtdict: src_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, '--trainpref must be set if --srcdict is not specified' src_dict = build_dictionary({train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True) tgt_dict = src_dict else: if args.srcdict: src_dict = task.load_dictionary(args.srcdict) else: assert args.trainpref, '--trainpref must be set if --srcdict is not specified' src_dict = build_dictionary([train_path(args.source_lang)], src=True) if target: if args.tgtdict: tgt_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, '--trainpref must be set if --tgtdict is not specified' tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True) else: tgt_dict = None src_dict.save(dict_path(args.source_lang)) if (target and (tgt_dict is not None)): tgt_dict.save(dict_path(args.target_lang)) def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers): print('| [{}] Dictionary: {} types'.format(lang, (len(vocab) - 1))) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result['replaced']) n_seq_tok[0] += worker_result['nseq'] n_seq_tok[1] += worker_result['ntok'] input_file = '{}{}'.format(input_prefix, (('.' + lang) if (lang is not None) else '')) offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) pool.apply_async(binarize, (args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab)) merge_result(Binarizer.binarize(input_file, vocab, (lambda t: ds.add_item(t)), offset=0, end=offsets[1])) if (num_workers > 1): pool.join() for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(lang, input_file, n_seq_tok[0], n_seq_tok[1], ((100 * sum(replaced.values())) / n_seq_tok[1]), vocab.unk_word)) def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers): nseq = [0] def merge_result(worker_result): nseq[0] += worker_result['nseq'] input_file = input_prefix offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if (num_workers > 1): pool = Pool(processes=(num_workers - 1)) for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) pool.apply_async(binarize_alignments, (args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl) merge_result(Binarizer.binarize_alignments(input_file, utils.parse_alignment, (lambda t: ds.add_item(t)), offset=0, end=offsets[1])) if (num_workers > 1): pool.join() for worker_id in range(1, num_workers): prefix = '{}{}'.format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, None) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx')) print('| [alignments] {}: parsed {} alignments'.format(input_file, nseq[0])) def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1): if (args.dataset_impl == 'raw'): output_text_file = dest_path((output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang)), lang) shutil.copyfile(file_name(input_prefix, lang), output_text_file) else: make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers) def make_all(lang, vocab): if args.trainpref: make_dataset(vocab, args.trainpref, 'train', lang, num_workers=args.workers) if args.validpref: for (k, validpref) in enumerate(args.validpref.split(',')): outprefix = ('valid{}'.format(k) if (k > 0) else 'valid') make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers) if args.testpref: for (k, testpref) in enumerate(args.testpref.split(',')): outprefix = ('test{}'.format(k) if (k > 0) else 'test') make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers) def make_all_alignments(): if (args.trainpref and os.path.exists(((args.trainpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.trainpref + '.') + args.align_suffix), 'train.align', num_workers=args.workers) if (args.validpref and os.path.exists(((args.validpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.validpref + '.') + args.align_suffix), 'valid.align', num_workers=args.workers) if (args.testpref and os.path.exists(((args.testpref + '.') + args.align_suffix))): make_binary_alignment_dataset(((args.testpref + '.') + args.align_suffix), 'test.align', num_workers=args.workers) make_all(args.source_lang, src_dict) if target: make_all(args.target_lang, tgt_dict) if args.align_suffix: make_all_alignments() print('| Wrote preprocessed data to {}'.format(args.destdir)) if args.alignfile: assert args.trainpref, '--trainpref must be set if --alignfile is specified' src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) freq_map = {} with open(args.alignfile, 'r', encoding='utf-8') as align_file: with open(src_file_name, 'r', encoding='utf-8') as src_file: with open(tgt_file_name, 'r', encoding='utf-8') as tgt_file: for (a, s, t) in zip_longest(align_file, src_file, tgt_file): si = src_dict.encode_line(s, add_if_not_exist=False) ti = tgt_dict.encode_line(t, add_if_not_exist=False) ai = list(map((lambda x: tuple(x.split('-'))), a.split())) for (sai, tai) in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if ((srcidx != src_dict.unk()) and (tgtidx != tgt_dict.unk())): assert (srcidx != src_dict.pad()) assert (srcidx != src_dict.eos()) assert (tgtidx != tgt_dict.pad()) assert (tgtidx != tgt_dict.eos()) if (srcidx not in freq_map): freq_map[srcidx] = {} if (tgtidx not in freq_map[srcidx]): freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(args.source_lang, args.target_lang)), 'w', encoding='utf-8') as f: for (k, v) in align_dict.items(): print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab)) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, 'bin'), impl=args.dataset_impl, vocab_size=None) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, None, 'idx')) return res
def dataset_dest_prefix(args, output_prefix, lang): base = '{}/{}'.format(args.destdir, output_prefix) if (lang is not None): lang_part = '.{}-{}.{}'.format(args.source_lang, args.target_lang, lang) elif args.only_source: lang_part = '' else: lang_part = '.{}-{}'.format(args.source_lang, args.target_lang) return '{}{}'.format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return '{}.{}'.format(base, extension)
def get_offsets(input_file, num_workers): return Binarizer.find_offsets(input_file, num_workers)
def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args)
def get_parser(): parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') return parser
def main(): parser = get_parser() args = parser.parse_args() print(args) assert ((args.sys == '-') or os.path.exists(args.sys)), 'System output file {} does not exist'.format(args.sys) assert os.path.exists(args.ref), 'Reference file {} does not exist'.format(args.ref) dict = dictionary.Dictionary() def readlines(fd): for line in fd.readlines(): if args.ignore_case: (yield line.lower()) else: (yield line) if args.sacrebleu: import sacrebleu def score(fdsys): with open(args.ref) as fdref: print(sacrebleu.corpus_bleu(fdsys, [fdref])) elif args.sentence_bleu: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for (i, (sys_tok, ref_tok)) in enumerate(zip(readlines(fdsys), readlines(fdref))): scorer.reset(one_init=True) sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(i, scorer.result_string(args.order)) else: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for (sys_tok, ref_tok) in zip(readlines(fdsys), readlines(fdref)): sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(scorer.result_string(args.order)) if (args.sys == '-'): score(sys.stdin) else: with open(args.sys, 'r') as f: score(f)
def average_checkpoints(inputs): "Loads checkpoints from inputs and returns a model with averaged weights.\n\n Args:\n inputs: An iterable of string paths of checkpoints to load from.\n\n Returns:\n A dict of string keys mapping to various values. The 'model' key\n from the returned dict should correspond to an OrderedDict mapping\n string parameter names to torch Tensors.\n " params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for f in inputs: state = torch.load(f, map_location=(lambda s, _: torch.serialization.default_restore_location(s, 'cpu'))) if (new_state is None): new_state = state model_params = state['model'] model_params_keys = list(model_params.keys()) if (params_keys is None): params_keys = model_params_keys elif (params_keys != model_params_keys): raise KeyError('For checkpoint {}, expected list of params: {}, but found: {}'.format(f, params_keys, model_params_keys)) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if (k not in params_dict): params_dict[k] = p.clone() else: params_dict[k] += p averaged_params = collections.OrderedDict() for (k, v) in params_dict.items(): averaged_params[k] = v averaged_params[k].div_(num_models) new_state['model'] = averaged_params return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert (len(paths) == 1) path = paths[0] if update_based: pt_regexp = re.compile('checkpoint_\\d+_(\\d+)\\.pt') else: pt_regexp = re.compile('checkpoint(\\d+)\\.pt') files = os.listdir(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if (m is not None): sort_key = int(m.group(1)) if ((upper_bound is None) or (sort_key <= upper_bound)): entries.append((sort_key, m.group(0))) if (len(entries) < n): raise Exception('Found {} checkpoint files but need at least {}', len(entries), n) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main(): parser = argparse.ArgumentParser(description='Tool to average the params of input checkpoints to produce a new checkpoint') parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.') parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.') num_group = parser.add_mutually_exclusive_group() num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, and average last this many of them.') num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, and average last this many of them.') parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which checkpoint to use, e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.') args = parser.parse_args() print(args) num = None is_update_based = False if (args.num_update_checkpoints is not None): num = args.num_update_checkpoints is_update_based = True elif (args.num_epoch_checkpoints is not None): num = args.num_epoch_checkpoints assert ((args.checkpoint_upper_bound is None) or (args.num_epoch_checkpoints is not None)), '--checkpoint-upper-bound requires --num-epoch-checkpoints' assert ((args.num_epoch_checkpoints is None) or (args.num_update_checkpoints is None)), 'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints' if (num is not None): args.inputs = last_n_checkpoints(args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound) print('averaging checkpoints: ', args.inputs) new_state = average_checkpoints(args.inputs) torch.save(new_state, args.output) print('Finished writing averaged checkpoint to {}.'.format(args.output))
class NumpyExtension(Extension): 'Source: https://stackoverflow.com/a/54128391' def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return (self.__include_dirs + [numpy.get_include()]) @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs
def main(args, init_distributed=False): utils.import_user_module(args) try: from fairseq.fb_pathmgr import fb_pathmgr global fb_pathmgr_registerd if (not fb_pathmgr_registerd): fb_pathmgr.register() fb_pathmgr_registerd = True except (ModuleNotFoundError, ImportError): pass assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences' if (torch.cuda.is_available() and (not args.cpu)): torch.cuda.set_device(args.device_id) np.random.seed(args.seed) torch.manual_seed(args.seed) if init_distributed: args.distributed_rank = distributed_utils.distributed_init(args) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) print(args) task = tasks.setup_task(args) for valid_sub_split in args.valid_subset.split(','): task.load_dataset(valid_sub_split, combine=False, epoch=0) model = task.build_model(args) criterion = task.build_criterion(args) print(model) print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__)) print('| num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad)))) trainer = Trainer(args, task, model, criterion) print('| training on {} GPUs'.format(args.distributed_world_size)) print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences)) (extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer) max_epoch = (args.max_epoch or math.inf) max_update = (args.max_update or math.inf) lr = trainer.get_lr() train_meter = StopwatchMeter() train_meter.start() valid_subsets = args.valid_subset.split(',') if (not hasattr(checkpoint_utils.save_checkpoint, 'not_best')): checkpoint_utils.save_checkpoint.not_best = 0 while ((lr > args.min_lr) and (epoch_itr.epoch < max_epoch) and (trainer.get_num_updates() < max_update)): train(args, trainer, task, epoch_itr) if ((not args.disable_validation) and ((epoch_itr.epoch % args.validate_interval) == 0)): valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) if (args.early_stop > 0): if (hasattr(checkpoint_utils.save_checkpoint, 'best') and (valid_losses[0] > checkpoint_utils.save_checkpoint.best)): checkpoint_utils.save_checkpoint.not_best += 1 print('| Not the best ckpt... not best:', checkpoint_utils.save_checkpoint.not_best) if (checkpoint_utils.save_checkpoint.not_best > args.early_stop): print('| Early stop...') break else: checkpoint_utils.save_checkpoint.not_best = 0 else: valid_losses = [None] lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) if ((epoch_itr.epoch % args.save_interval) == 0): checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) reload_dataset = (':' in getattr(args, 'data', '')) epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset) train_meter.stop() print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr): 'Train the model for one epoch.' update_freq = (args.update_freq[(epoch_itr.epoch - 1)] if (epoch_itr.epoch <= len(args.update_freq)) else args.update_freq[(- 1)]) itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.epoch >= args.curriculum)) itr = iterators.GroupedIterator(itr, update_freq) progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple') extra_meters = collections.defaultdict((lambda : AverageMeter())) valid_subsets = args.valid_subset.split(',') max_update = (args.max_update or math.inf) for (i, samples) in enumerate(progress, start=epoch_itr.iterations_in_epoch): log_output = trainer.train_step(samples) if (log_output is None): continue stats = get_training_stats(trainer) for (k, v) in log_output.items(): if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']): continue if (('loss' in k) or (k == 'accuracy')): extra_meters[k].update(v, log_output['sample_size']) else: extra_meters[k].update(v) stats[k] = extra_meters[k].avg progress.log(stats, tag='train', step=stats['num_updates']) if (i == 0): trainer.get_meter('wps').reset() trainer.get_meter('ups').reset() num_updates = trainer.get_num_updates() if ((not args.disable_validation) and (args.save_interval_updates > 0) and ((num_updates % args.save_interval_updates) == 0) and (num_updates > 0)): valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) if (num_updates >= max_update): break stats = get_training_stats(trainer) for (k, meter) in extra_meters.items(): stats[k] = meter.avg progress.print(stats, tag='train', step=stats['num_updates']) for k in ['train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip']: meter = trainer.get_meter(k) if (meter is not None): meter.reset()
def get_training_stats(trainer): stats = collections.OrderedDict() stats['loss'] = trainer.get_meter('train_loss') if (trainer.get_meter('train_nll_loss').count > 0): nll_loss = trainer.get_meter('train_nll_loss') stats['nll_loss'] = nll_loss else: nll_loss = trainer.get_meter('train_loss') stats['ppl'] = utils.get_perplexity(nll_loss.avg) stats['wps'] = trainer.get_meter('wps') stats['ups'] = trainer.get_meter('ups') stats['wpb'] = trainer.get_meter('wpb') stats['bsz'] = trainer.get_meter('bsz') stats['num_updates'] = trainer.get_num_updates() stats['lr'] = trainer.get_lr() stats['gnorm'] = trainer.get_meter('gnorm') stats['clip'] = trainer.get_meter('clip') stats['oom'] = trainer.get_meter('oom') if (trainer.get_meter('loss_scale') is not None): stats['loss_scale'] = trainer.get_meter('loss_scale') stats['wall'] = round(trainer.get_meter('wall').elapsed_time) stats['train_wall'] = trainer.get_meter('train_wall') return stats
def validate(args, trainer, task, epoch_itr, subsets): 'Evaluate the model on the validation set(s) and return the losses.' if (args.fixed_validation_seed is not None): utils.set_torch_seed(args.fixed_validation_seed) valid_losses = [] for subset in subsets: itr = task.get_batch_iterator(dataset=task.dataset(subset), max_tokens=args.max_tokens_valid, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions(task.max_positions(), trainer.get_model().max_positions()), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, num_workers=args.num_workers).next_epoch_itr(shuffle=False) progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple') for k in ['valid_loss', 'valid_nll_loss']: meter = trainer.get_meter(k) if (meter is not None): meter.reset() extra_meters = collections.defaultdict((lambda : AverageMeter())) for sample in progress: log_output = trainer.valid_step(sample) for (k, v) in log_output.items(): if (k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']): continue extra_meters[k].update(v) stats = get_valid_stats(trainer, args, extra_meters) for (k, meter) in extra_meters.items(): stats[k] = meter.avg progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append((stats[args.best_checkpoint_metric].avg if (args.best_checkpoint_metric == 'loss') else stats[args.best_checkpoint_metric])) return valid_losses
def get_valid_stats(trainer, args, extra_meters=None): stats = collections.OrderedDict() stats['loss'] = trainer.get_meter('valid_loss') if (trainer.get_meter('valid_nll_loss').count > 0): nll_loss = trainer.get_meter('valid_nll_loss') stats['nll_loss'] = nll_loss else: nll_loss = stats['loss'] stats['ppl'] = utils.get_perplexity(nll_loss.avg) stats['num_updates'] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, 'best'): key = 'best_{0}'.format(args.best_checkpoint_metric) best_function = (max if args.maximize_best_checkpoint_metric else min) current_metric = None if (args.best_checkpoint_metric == 'loss'): current_metric = stats['loss'].avg elif (args.best_checkpoint_metric in extra_meters): current_metric = extra_meters[args.best_checkpoint_metric].avg elif (args.best_checkpoint_metric in stats): current_metric = stats[args.best_checkpoint_metric] else: raise ValueError('best_checkpoint_metric not found in logs') stats[key] = best_function(checkpoint_utils.save_checkpoint.best, current_metric) return stats
def distributed_main(i, args, start_rank=0): args.device_id = i if (args.distributed_rank is None): args.distributed_rank = (start_rank + i) main(args, init_distributed=True)
def cli_main(): parser = options.get_training_parser() args = options.parse_args_and_arch(parser) if (args.distributed_init_method is None): distributed_utils.infer_init_method(args) if (args.distributed_init_method is not None): if ((torch.cuda.device_count() > 1) and (not args.distributed_no_spawn)): start_rank = args.distributed_rank args.distributed_rank = None torch.multiprocessing.spawn(fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count()) else: distributed_main(args.device_id, args) elif (args.distributed_world_size > 1): assert (args.distributed_world_size <= torch.cuda.device_count()) port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) args.distributed_rank = None if ((max(args.update_freq) > 1) and (args.ddp_backend != 'no_c10d')): print('| NOTE: you may get better performance with: --ddp-backend=no_c10d') torch.multiprocessing.spawn(fn=distributed_main, args=(args,), nprocs=args.distributed_world_size) else: main(args)
def main(args, override_args=None): utils.import_user_module(args) use_fp16 = args.fp16 use_cuda = (torch.cuda.is_available() and (not args.cpu)) if (override_args is not None): overrides = vars(override_args) overrides.update(eval(getattr(override_args, 'model_overrides', '{}'))) else: overrides = None print('| loading model(s) from {}'.format(args.path)) (models, model_args, task) = checkpoint_utils.load_model_ensemble_and_task([args.path], arg_overrides=overrides) model = models[0] for model in models: if use_fp16: model.half() if use_cuda: model.cuda() print(model_args) criterion = task.build_criterion(model_args) criterion.eval() for subset in args.valid_subset.split(','): try: task.load_dataset(subset, combine=False, epoch=0) dataset = task.dataset(subset) except KeyError: raise Exception(('Cannot find dataset: ' + subset)) itr = task.get_batch_iterator(dataset=dataset, max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(task.max_positions(), *[m.max_positions() for m in models]), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_workers=args.num_workers).next_epoch_itr(shuffle=False) progress = progress_bar.build_progress_bar(args, itr, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple') log_outputs = [] for (i, sample) in enumerate(progress): sample = (utils.move_to_cuda(sample) if use_cuda else sample) (_loss, _sample_size, log_output) = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) log_output = task.aggregate_logging_outputs(log_outputs, criterion) progress.print(log_output, tag=subset, step=i)
def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True) main(args, override_args)
@lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache() def bytes_to_unicode(): "\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n " bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1)))) cs = bs[:] n = 0 for b in range((2 ** 8)): if (b not in bs): bs.append(b) cs.append(((2 ** 8) + n)) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs))
def get_pairs(word): 'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n ' pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs