repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/scripts/constraints/extract.py
scripts/constraints/extract.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extracts random constraints from reference files.""" import argparse import random import sys from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert(index < len(words) - length + 1) phr = ' '.join(words[index:index+length]) for i in range(index, index + length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if '\t' in line: source, target = line.split('\t') if args.add_sos: target = f"<s> {target}" if args.add_eos: target = f"{target} </s>" if len(target.split()) >= args.len: words = [target] num = args.number choices = {} for i in range(num): if len(words) == 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = " ".join(tokens[phrase_index:min(len(tokens), phrase_index + args.len)]) for j in range(phrase_index, min(len(tokens), phrase_index + args.len)): tokens.pop(phrase_index) if phrase_index > 0: words.append(" ".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(" ".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces target = target.replace(choice, " " * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep="\t") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--number', '-n', type=int, default=1, help="number of phrases") parser.add_argument('--len', '-l', type=int, default=1, help="phrase length") parser.add_argument('--add-sos', default=False, action='store_true', help='add <s> token') parser.add_argument('--add-eos', default=False, action='store_true', help='add </s> token') parser.add_argument('--seed', "-s", default=0, type=int) args = parser.parse_args() main(args)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/train.py
fairseq_cli/train.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a new model on one or across multiple GPUs. """ import argparse import logging import math import os import random import sys import numpy as np import torch from fairseq import ( checkpoint_utils, distributed_utils, options, quantization_utils, tasks, utils, ) from fairseq.data import iterators from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from fairseq.modules import TransformerDecoderLayer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.train") def main(args): utils.import_user_module(args) assert ( args.max_tokens is not None or args.max_sentences is not None ), "Must specify batch size either with --max-tokens or --max-sentences" metrics.reset() np.random.seed(args.seed) utils.set_torch_seed(args.seed) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) # Print args logger.info(args) # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(args) # Load valid dataset (we load training data below, based on the latest checkpoint) for valid_sub_split in args.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # Build model and criterion model = task.build_model(args) criterion = task.build_criterion(args) logger.info(model) logger.info("task: {} ({})".format(args.task, task.__class__.__name__)) logger.info("model: {} ({})".format(args.arch, model.__class__.__name__)) logger.info( "criterion: {} ({})".format(args.criterion, criterion.__class__.__name__) ) logger.info( "num. model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters()), sum(p.numel() for p in model.parameters() if p.requires_grad), ) ) # (optionally) Configure quantization if args.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=args.quantization_config_path, max_epoch=args.max_epoch, max_update=args.max_update, ) else: quantizer = None # Build trainer if args.model_parallel_size == 1: trainer = Trainer(args, task, model, criterion, quantizer) else: trainer = MegatronTrainer(args, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format(args.distributed_world_size) ) logger.info( "max tokens per GPU = {} and max sentences per GPU = {}".format( args.max_tokens, args.max_sentences ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( args, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) # Train until the learning rate gets too small max_epoch = args.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch: # train for one epoch valid_losses, should_stop = train(args, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) def should_stop_early(args, valid_loss): # skip check if no validation was done in the current epoch if valid_loss is None: return False if args.patience <= 0: return False def is_better(a, b): return a > b if args.maximize_best_checkpoint_metric else a < b prev_best = getattr(should_stop_early, "best", None) if prev_best is None or is_better(valid_loss, prev_best): should_stop_early.best = valid_loss should_stop_early.num_runs = 0 return False else: should_stop_early.num_runs += 1 if should_stop_early.num_runs >= args.patience: logger.info( "early stop since valid performance hasn't improved for last {} runs".format( args.patience ) ) return True else: return False @metrics.aggregate("train") def train(args, trainer, task, epoch_itr): """Train the model for one epoch and return validation losses.""" clear_prev_tokens(trainer) # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > args.curriculum), ) update_freq = ( args.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1] ) itr = iterators.GroupedIterator(itr, update_freq) if getattr(args, "tpu", False): itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( args.tensorboard_logdir if distributed_utils.is_master(args) else None ), default_log_format=("tqdm" if not args.no_progress_bar else "simple"), ) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = args.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % args.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( args, trainer, task, epoch_itr, valid_subsets, end_of_epoch ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch): num_updates = trainer.get_num_updates() max_update = args.max_update or math.inf do_save = ( (end_of_epoch and epoch_itr.epoch % args.save_interval == 0) or num_updates >= max_update or ( args.save_interval_updates > 0 and num_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates >= args.validate_after_updates ) ) do_validate = ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0) or num_updates >= max_update or ( args.validate_interval_updates > 0 and num_updates > 0 and num_updates % args.validate_interval_updates == 0 ) ) and not args.disable_validation # Validate valid_losses = [None] if do_validate: valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) # Stopping conditions should_stop = ( should_stop_early(args, valid_losses[0]) or num_updates >= max_update or ( args.stop_time_hours > 0 and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours ) ) # Save checkpoint if do_save or should_stop: logger.info("begin save checkpoint") checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) return valid_losses, should_stop def get_training_stats(stats): stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def clear_prev_tokens(trainer): # This function clears the cache before/after validation. for name, module in trainer.get_model().named_modules(): if isinstance(module, TransformerDecoderLayer): module.prev_out = None def validate(args, trainer, task, epoch_itr, subsets): """Evaluate the model on the validation set(s) and return the losses.""" clear_prev_tokens(trainer) if args.fixed_validation_seed is not None: # set fixed seed for every validation utils.set_torch_seed(args.fixed_validation_seed) valid_losses = [] for subset in subsets: logger.info('begin validation on "{}" subset'.format(subset)) # Initialize data iterator itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False) if getattr(args, "tpu", False): itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch_itr.epoch, prefix=f"valid on '{subset}' subset", tensorboard_logdir=( args.tensorboard_logdir if distributed_utils.is_master(args) else None ), default_log_format=("tqdm" if not args.no_progress_bar else "simple"), ) # create a new root metrics aggregator so validation metrics # don't pollute other aggregators (e.g., train meters) with metrics.aggregate(new_root=True) as agg: for sample in progress: trainer.valid_step(sample) # log validation stats stats = get_valid_stats(args, trainer, agg.get_smoothed_values()) progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append(stats[args.best_checkpoint_metric]) return valid_losses def get_valid_stats(args, trainer, stats): stats["num_updates"] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, "best"): key = "best_{0}".format(args.best_checkpoint_metric) best_function = max if args.maximize_best_checkpoint_metric else min stats[key] = best_function( checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric] ) return stats def cli_main(modify_parser=None): parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(args, main) else: distributed_utils.call_main(args, main) if __name__ == "__main__": cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/generate.py
fairseq_cli/generate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import logging import math import os import sys import numpy as np import torch from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter from fairseq.data import encoders def main(args): assert args.path is not None, '--path required for generation!' assert not args.sampling or args.nbest == args.beam, \ '--sampling requires --nbest to be equal to --beam' assert args.replace_unk is None or args.dataset_impl == 'raw', \ '--replace-unk requires a raw text dataset (--dataset-impl=raw)' if args.results_path is not None: os.makedirs(args.results_path, exist_ok=True) output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset)) with open(output_path, 'w', buffering=1, encoding='utf-8') as h: return _main(args, h) else: return _main(args, sys.stdout) def get_symbols_to_strip_from_output(generator): if hasattr(generator, 'symbols_to_strip_from_output'): return generator.symbols_to_strip_from_output else: return {generator.eos} def _main(args, output_file): logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=output_file, ) logger = logging.getLogger('fairseq_cli.generate') utils.import_user_module(args) if args.max_tokens is None and args.max_sentences is None: args.max_tokens = 12000 logger.info(args) # Fix seed for stochastic decoding if args.seed is not None and not args.no_seed_provided: np.random.seed(args.seed) utils.set_torch_seed(args.seed) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) # Set dictionaries try: src_dict = getattr(task, 'source_dictionary', None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary # Load ensemble logger.info('loading model(s) from {}'.format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( utils.split_paths(args.path), arg_overrides=eval(args.model_overrides), task=task, suffix=getattr(args, "checkpoint_suffix", ""), ) # Optimize ensemble for generation for model in models: model.prepare_for_inference_(args) if args.fp16: model.half() if use_cuda: model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, default_log_format=('tqdm' if not args.no_progress_bar else 'none'), ) # Initialize generator gen_timer = StopwatchMeter() generator = task.build_generator(models, args) # Handle tokenization and BPE tokenizer = encoders.build_tokenizer(args) bpe = encoders.build_bpe(args) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer = scoring.build_scorer(args, tgt_dict) num_sentences = 0 has_target = True wps_meter = TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if 'net_input' not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample['target'][:, :args.prefix_size] constraints = None if "constraints" in sample: constraints = sample["constraints"] gen_timer.start() hypos = task.inference_step(generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints) num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample['id'].tolist()): has_target = sample['target'] is not None # Remove padding if 'src_tokens' in sample['net_input']: src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad()) else: src_tokens = None target_tokens = None if has_target: target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu() # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id) target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id) else: if src_dict is not None: src_str = src_dict.string(src_tokens, args.remove_bpe) else: src_str = "" if has_target: target_str = tgt_dict.string( target_tokens, args.remove_bpe, escape_unk=True, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) src_str = decode_fn(src_str) if has_target: target_str = decode_fn(target_str) if not args.quiet: if src_dict is not None: print('S-{}\t{}'.format(sample_id, src_str), file=output_file) if has_target: print('T-{}\t{}'.format(sample_id, target_str), file=output_file) # Process top predictions for j, hypo in enumerate(hypos[i][:args.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) if not args.quiet: score = hypo['score'] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print('H-{}\t{}\t{}'.format(sample_id, score, hypo_str), file=output_file) # detokenized hypothesis print('D-{}\t{}\t{}'.format(sample_id, score, detok_hypo_str), file=output_file) print('P-{}\t{}'.format( sample_id, ' '.join(map( lambda x: '{:.4f}'.format(x), # convert from base e to base 2 hypo['positional_scores'].div_(math.log(2)).tolist(), )) ), file=output_file) if args.print_alignment: print('A-{}\t{}'.format( sample_id, ' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment]) ), file=output_file) if args.print_step: print('I-{}\t{}'.format(sample_id, hypo['steps']), file=output_file) if getattr(args, 'retain_iter_history', False): for step, h in enumerate(hypo['history']): _, h_str, _ = utils.post_process_prediction( hypo_tokens=h['tokens'].int().cpu(), src_str=src_str, alignment=None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=None, ) print('E-{}_{}\t{}'.format(sample_id, step, h_str), file=output_file) # Score only the top hypothesis if has_target and j == 0: if align_dict is not None or args.remove_bpe is not None: # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True) hypo_tokens = tgt_dict.encode_line(detok_hypo_str, add_if_not_exist=True) if hasattr(scorer, 'add_string'): scorer.add_string(target_str, detok_hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) progress.log({'wps': round(wps_meter.avg)}) num_sentences += sample["nsentences"] if "nsentences" in sample else sample['id'].numel() logger.info('NOTE: hypothesis and token scores are output in base 2') logger.info('Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg)) if has_target: if args.bpe and not args.sacrebleu: if args.remove_bpe: logger.warning("BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization") else: logger.warning("If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization") # use print to be consistent with other main outputs: S-, H-, T-, D- and so on print( 'Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()), file=output_file) return scorer def cli_main(): parser = options.get_generation_parser() args = options.parse_args_and_arch(parser) main(args) if __name__ == '__main__': cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/validate.py
fairseq_cli/validate.py
#!/usr/bin/env python3 -u #!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from itertools import chain import logging import os import sys import torch from fairseq import checkpoint_utils, distributed_utils, options, utils from fairseq.logging import metrics, progress_bar logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) logger = logging.getLogger('fairseq_cli.validate') def main(args, override_args=None): utils.import_user_module(args) assert args.max_tokens is not None or args.max_sentences is not None, \ 'Must specify batch size either with --max-tokens or --max-sentences' use_fp16 = args.fp16 use_cuda = torch.cuda.is_available() and not args.cpu if use_cuda: torch.cuda.set_device(args.device_id) if override_args is not None: overrides = vars(override_args) overrides.update(eval(getattr(override_args, 'model_overrides', '{}'))) else: overrides = None # Load ensemble logger.info('loading model(s) from {}'.format(args.path)) models, model_args, task = checkpoint_utils.load_model_ensemble_and_task( [args.path], arg_overrides=overrides, suffix=getattr(args, "checkpoint_suffix", ""), ) model = models[0] # Move models to GPU for model in models: if use_fp16: model.half() if use_cuda: model.cuda() # Print args logger.info(model_args) # Build criterion criterion = task.build_criterion(model_args) criterion.eval() for subset in args.valid_subset.split(','): try: task.load_dataset(subset, combine=False, epoch=1) dataset = task.dataset(subset) except KeyError: raise Exception('Cannot find dataset: ' + subset) # Initialize data iterator itr = task.get_batch_iterator( dataset=dataset, max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models], ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=('tqdm' if not args.no_progress_bar else 'simple'), ) log_outputs = [] for i, sample in enumerate(progress): sample = utils.move_to_cuda(sample) if use_cuda else sample _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) if args.distributed_world_size > 1: log_outputs = distributed_utils.all_gather_list( log_outputs, max_size=getattr(args, 'all_gather_list_size', 16384), ) log_outputs = list(chain.from_iterable(log_outputs)) with metrics.aggregate() as agg: task.reduce_metrics(log_outputs, criterion) log_output = agg.get_smoothed_values() progress.print(log_output, tag=subset, step=i) def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True) distributed_utils.call_main(args, main, override_args=override_args) if __name__ == '__main__': cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/interactive.py
fairseq_cli/interactive.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate raw text with a trained model. Batches data on-the-fly. """ from collections import namedtuple import fileinput import logging import math import sys import time import os import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.data import encoders from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from .generate import get_symbols_to_strip_from_output logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) logger = logging.getLogger('fairseq_cli.interactive') Batch = namedtuple('Batch', 'ids src_tokens src_lengths constraints') Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments') def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h: for src_str in h: buffer.append(src_str.strip()) if len(buffer) >= buffer_size: yield buffer buffer = [] if len(buffer) > 0: yield buffer def make_batches(lines, args, task, max_positions, encode_fn): def encode_fn_target(x): return encode_fn(x) if args.constraints: # Strip (tab-delimited) contraints, if present, from input lines, # store them in batch_constraints batch_constraints = [list() for _ in lines] for i, line in enumerate(lines): if "\t" in line: lines[i], *batch_constraints[i] = line.split("\t") # Convert each List[str] to List[Tensor] for i, constraint_list in enumerate(batch_constraints): batch_constraints[i] = [task.target_dictionary.encode_line( encode_fn_target(constraint), append_eos=False, add_if_not_exist=False, ) for constraint in constraint_list] tokens = [ task.source_dictionary.encode_line( encode_fn(src_str), add_if_not_exist=False ).long() for src_str in lines ] if args.constraints: constraints_tensor = pack_constraints(batch_constraints) else: constraints_tensor = None lengths = [t.numel() for t in tokens] itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference(tokens, lengths, constraints=constraints_tensor), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test ).next_epoch_itr(shuffle=False) for batch in itr: ids = batch['id'] src_tokens = batch['net_input']['src_tokens'] src_lengths = batch['net_input']['src_lengths'] constraints = batch.get("constraints", None) yield Batch( ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints, ) def main(args): start_time = time.time() total_translate_time = 0 utils.import_user_module(args) if args.buffer_size < 1: args.buffer_size = 1 if args.max_tokens is None and args.max_sentences is None: args.max_sentences = 1 assert not args.sampling or args.nbest == args.beam, \ '--sampling requires --nbest to be equal to --beam' assert not args.max_sentences or args.max_sentences <= args.buffer_size, \ '--max-sentences/--batch-size cannot be larger than --buffer-size' logger.info(args) # Fix seed for stochastic decoding if args.seed is not None and not args.no_seed_provided: np.random.seed(args.seed) utils.set_torch_seed(args.seed) use_cuda = torch.cuda.is_available() and not args.cpu # Setup task, e.g., translation task = tasks.setup_task(args) # Load ensemble logger.info('loading model(s) from {}'.format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(os.pathsep), arg_overrides=eval(args.model_overrides), task=task, suffix=getattr(args, "checkpoint_suffix", ""), ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: model.prepare_for_inference_(args) if args.fp16: model.half() if use_cuda: model.cuda() # Initialize generator generator = task.build_generator(models, args) # Handle tokenization and BPE tokenizer = encoders.build_tokenizer(args) bpe = encoders.build_bpe(args) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if args.constraints: logger.warning("NOTE: Constrained decoding currently assumes a shared subword vocabulary.") if args.buffer_size > 1: logger.info('Sentence buffer size: %s', args.buffer_size) logger.info('NOTE: hypothesis and token scores are output in base 2') logger.info('Type the input sentence and press return:') start_id = 0 for inputs in buffered_read(args.input, args.buffer_size): results = [] for batch in make_batches(inputs, args, task, max_positions, encode_fn): bsz = batch.src_tokens.size(0) src_tokens = batch.src_tokens src_lengths = batch.src_lengths constraints = batch.constraints if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() if constraints is not None: constraints = constraints.cuda() sample = { 'net_input': { 'src_tokens': src_tokens, 'src_lengths': src_lengths, }, } translate_start_time = time.time() translations = task.inference_step(generator, models, sample, constraints=constraints) translate_time = time.time() - translate_start_time total_translate_time += translate_time list_constraints = [[] for _ in range(bsz)] if args.constraints: list_constraints = [unpack_constraints(c) for c in constraints] for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) constraints = list_constraints[i] results.append((start_id + id, src_tokens_i, hypos, { "constraints": constraints, "time": translate_time / len(translations) } )) # sort output to match input order for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]): if src_dict is not None: src_str = src_dict.string(src_tokens, args.remove_bpe) print('S-{}\t{}'.format(id_, src_str)) print("W-{}\t{:.3f}\tseconds".format(id_, info["time"])) for constraint in info["constraints"]: print("C-{}\t{}".format(id_, tgt_dict.string(constraint, args.remove_bpe))) # Process top predictions for hypo in hypos[:min(len(hypos), args.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) score = hypo['score'] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print('H-{}\t{}\t{}'.format(id_, score, hypo_str)) # detokenized hypothesis print('D-{}\t{}\t{}'.format(id_, score, detok_hypo_str)) print('P-{}\t{}'.format( id_, ' '.join(map( lambda x: '{:.4f}'.format(x), # convert from base e to base 2 hypo['positional_scores'].div_(math.log(2)).tolist(), )) )) if args.print_alignment: alignment_str = " ".join(["{}-{}".format(src, tgt) for src, tgt in alignment]) print('A-{}\t{}'.format( id_, alignment_str )) # update running id_ counter start_id += len(inputs) logger.info("Total time: {:.3f} seconds; translation time: {:.3f}".format(time.time() - start_time, total_translate_time)) def cli_main(): parser = options.get_interactive_generation_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(args, main) if __name__ == '__main__': cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/__init__.py
fairseq_cli/__init__.py
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/score.py
fairseq_cli/score.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BLEU scoring of generated translations against reference translations. """ import argparse import os import sys from fairseq.scoring import bleu from fairseq.data import dictionary def get_parser(): parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') # fmt: off parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') # fmt: on return parser def cli_main(): parser = get_parser() args = parser.parse_args() print(args) assert args.sys == '-' or os.path.exists(args.sys), \ "System output file {} does not exist".format(args.sys) assert os.path.exists(args.ref), \ "Reference file {} does not exist".format(args.ref) dict = dictionary.Dictionary() def readlines(fd): for line in fd.readlines(): if args.ignore_case: yield line.lower() else: yield line if args.sacrebleu: import sacrebleu def score(fdsys): with open(args.ref) as fdref: print(sacrebleu.corpus_bleu(fdsys, [fdref])) elif args.sentence_bleu: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))): scorer.reset(one_init=True) sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(i, scorer.result_string(args.order)) else: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(scorer.result_string(args.order)) if args.sys == '-': score(sys.stdin) else: with open(args.sys, 'r') as f: score(f) if __name__ == '__main__': cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/eval_lm.py
fairseq_cli/eval_lm.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Evaluate the perplexity of a trained language model. """ import logging import math import os import torch from fairseq import checkpoint_utils, options, tasks, utils from fairseq.data import LMContextWindowDataset from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter from fairseq.sequence_scorer import SequenceScorer from fairseq import distributed_utils logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), ) logger = logging.getLogger('fairseq_cli.eval_lm') class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): """ increments counters for the sum of log probs of current word and next word (given context ending at current word). Since the next word might be at the end of the example, or it might be not counted because it is not an ending subword unit, also keeps track of how many of those we have seen """ if next_word_prob is not None: self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, self.count - self.missing_next_words) def main(parsed_args, **unused_kwargs): assert parsed_args.path is not None, '--path required for evaluation!' if torch.cuda.is_available() and not parsed_args.cpu: torch.cuda.set_device(parsed_args.device_id) utils.import_user_module(parsed_args) logger.info(parsed_args) use_cuda = torch.cuda.is_available() and not parsed_args.cpu task = tasks.setup_task(parsed_args) # Load ensemble logger.info('loading model(s) from {}'.format(parsed_args.path)) models, args = checkpoint_utils.load_model_ensemble( parsed_args.path.split(os.pathsep), arg_overrides=eval(parsed_args.model_overrides), task=task, suffix=getattr(parsed_args, "checkpoint_suffix", ""), ) for arg in vars(parsed_args).keys(): if arg not in { 'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token', }: setattr(args, arg, getattr(parsed_args, arg)) # reduce tokens per sample by the required context window size args.tokens_per_sample -= args.context_window task = tasks.setup_task(args) # Load dataset splits task.load_dataset(args.gen_subset) dataset = task.dataset(args.gen_subset) if args.context_window > 0: dataset = LMContextWindowDataset( dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad(), ) logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset))) # Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer) for model in models: model.prepare_for_inference_(args) if args.fp16: model.half() if use_cuda: model.cuda() assert len(models) > 0 logger.info('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters()))) itr = task.get_batch_iterator( dataset=dataset, max_tokens=args.max_tokens or 36000, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[ model.max_positions() for model in models ]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=args.log_format, log_interval=args.log_interval, default_log_format=('tqdm' if not args.no_progress_bar else 'none'), ) gen_timer = StopwatchMeter() scorer = SequenceScorer(task.target_dictionary, args.softmax_batch) score_sum = 0. count = 0 if args.remove_bpe is not None: if args.remove_bpe == 'sentencepiece': raise NotImplementedError else: bpe_cont = args.remove_bpe.rstrip() bpe_toks = { i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont) } bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() wps_meter = TimeMeter() for sample in progress: if 'net_input' not in sample: continue sample = utils.move_to_cuda(sample) if use_cuda else sample gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample['ntokens']) for i, hypos_i in enumerate(hypos): hypo = hypos_i[0] sample_id = sample['id'][i] tokens = hypo['tokens'] tgt_len = tokens.numel() pos_scores = hypo['positional_scores'].float() if getattr(args, 'add_bos_token', False): assert hypo['tokens'][0].item() == task.target_dictionary.bos() tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if bpe_toks is not None: for i in range(tgt_len - 1): if tokens[i].item() in bpe_toks: skipped_toks += 1 pos_scores[i + 1] += pos_scores[i] pos_scores[i] = 0 inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf')) if inf_scores.any(): logger.info( 'skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()]) ) pos_scores = pos_scores[(~inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += pos_scores.numel() - skipped_toks if args.output_word_probs or args.output_word_stats: w = '' word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += task.source_dictionary[w_ind] if bpe_toks is not None and w_ind in bpe_toks: w = w[:-bpe_len] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = i + 1 while ind < len(tokens): if pos_scores[ind].item() != 0: next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob) is_bpe = False w = '' if args.output_word_probs: logger.info( str(int(sample_id)) + " " + ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob)) ) wps_meter.update(sample['ntokens']) progress.log({'wps': round(wps_meter.avg)}) avg_nll_loss = -score_sum / count / math.log(2) # convert to base 2 logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format( gen_timer.n, gen_timer.sum, 1. / gen_timer.avg )) logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format( avg_nll_loss, 2**avg_nll_loss )) if args.output_word_stats: for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True): logger.info(ws) def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(args, main) if __name__ == '__main__': cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq_cli/preprocess.py
fairseq_cli/preprocess.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Data pre-processing: build vocabularies and binarize training data. """ from collections import Counter from itertools import zip_longest import logging from multiprocessing import Pool import os import shutil import sys from fairseq import options, tasks, utils from fairseq.data import indexed_dataset from fairseq.binarizer import Binarizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) logger = logging.getLogger('fairseq_cli.preprocess') def main(args): utils.import_user_module(args) os.makedirs(args.destdir, exist_ok=True) logger.addHandler(logging.FileHandler( filename=os.path.join(args.destdir, 'preprocess.log'), )) logger.info(args) task = tasks.get_task(args.task) def train_path(lang): return "{}{}".format(args.trainpref, ("." + lang) if lang else "") def file_name(prefix, lang): fname = prefix if lang is not None: fname += ".{lang}".format(lang=lang) return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return dest_path("dict", lang) + ".txt" def build_dictionary(filenames, src=False, tgt=False): assert src ^ tgt return task.build_dictionary( filenames, workers=args.workers, threshold=args.thresholdsrc if src else args.thresholdtgt, nwords=args.nwordssrc if src else args.nwordstgt, padding_factor=args.padding_factor, ) target = not args.only_source if not args.srcdict and os.path.exists(dict_path(args.source_lang)): raise FileExistsError(dict_path(args.source_lang)) if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)): raise FileExistsError(dict_path(args.target_lang)) if args.joined_dictionary: assert not args.srcdict or not args.tgtdict, \ "cannot use both --srcdict and --tgtdict with --joined-dictionary" if args.srcdict: src_dict = task.load_dictionary(args.srcdict) elif args.tgtdict: src_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary( {train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True ) tgt_dict = src_dict else: if args.srcdict: src_dict = task.load_dictionary(args.srcdict) else: assert args.trainpref, "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary([train_path(args.source_lang)], src=True) if target: if args.tgtdict: tgt_dict = task.load_dictionary(args.tgtdict) else: assert args.trainpref, "--trainpref must be set if --tgtdict is not specified" tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True) else: tgt_dict = None src_dict.save(dict_path(args.source_lang)) if target and tgt_dict is not None: tgt_dict.save(dict_path(args.target_lang)) def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers): logger.info("[{}] Dictionary: {} types".format(lang, len(vocab))) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result["replaced"]) n_seq_tok[0] += worker_result["nseq"] n_seq_tok[1] += worker_result["ntok"] input_file = "{}{}".format( input_prefix, ("." + lang) if lang is not None else "" ) offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async( binarize, ( args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[worker_id + 1] ), callback=merge_result ) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab)) merge_result( Binarizer.binarize( input_file, vocab, lambda t: ds.add_item(t), offset=0, end=offsets[1] ) ) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) logger.info( "[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format( lang, input_file, n_seq_tok[0], n_seq_tok[1], 100 * sum(replaced.values()) / n_seq_tok[1], vocab.unk_word, ) ) def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers): nseq = [0] def merge_result(worker_result): nseq[0] += worker_result['nseq'] input_file = input_prefix offsets = Binarizer.find_offsets(input_file, num_workers) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async( binarize_alignments, ( args, input_file, utils.parse_alignment, prefix, offsets[worker_id], offsets[worker_id + 1] ), callback=merge_result ) pool.close() ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl) merge_result( Binarizer.binarize_alignments( input_file, utils.parse_alignment, lambda t: ds.add_item(t), offset=0, end=offsets[1] ) ) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, None) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) logger.info( "[alignments] {}: parsed {} alignments".format( input_file, nseq[0] ) ) def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1): if args.dataset_impl == "raw": # Copy original text file to destination folder output_text_file = dest_path( output_prefix + ".{}-{}".format(args.source_lang, args.target_lang), lang, ) shutil.copyfile(file_name(input_prefix, lang), output_text_file) else: make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers) def make_all(lang, vocab): if args.trainpref: make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers) if args.validpref: for k, validpref in enumerate(args.validpref.split(",")): outprefix = "valid{}".format(k) if k > 0 else "valid" make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers) if args.testpref: for k, testpref in enumerate(args.testpref.split(",")): outprefix = "test{}".format(k) if k > 0 else "test" make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers) def make_all_alignments(): if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix): make_binary_alignment_dataset(args.trainpref + "." + args.align_suffix, "train.align", num_workers=args.workers) if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix): make_binary_alignment_dataset(args.validpref + "." + args.align_suffix, "valid.align", num_workers=args.workers) if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix): make_binary_alignment_dataset(args.testpref + "." + args.align_suffix, "test.align", num_workers=args.workers) make_all(args.source_lang, src_dict) if target: make_all(args.target_lang, tgt_dict) if args.align_suffix: make_all_alignments() logger.info("Wrote preprocessed data to {}".format(args.destdir)) if args.alignfile: assert args.trainpref, "--trainpref must be set if --alignfile is specified" src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) freq_map = {} with open(args.alignfile, "r", encoding='utf-8') as align_file: with open(src_file_name, "r", encoding='utf-8') as src_file: with open(tgt_file_name, "r", encoding='utf-8') as tgt_file: for a, s, t in zip_longest(align_file, src_file, tgt_file): si = src_dict.encode_line(s, add_if_not_exist=False) ti = tgt_dict.encode_line(t, add_if_not_exist=False) ai = list(map(lambda x: tuple(x.split("-")), a.split())) for sai, tai in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk(): assert srcidx != src_dict.pad() assert srcidx != src_dict.eos() assert tgtidx != tgt_dict.pad() assert tgtidx != tgt_dict.eos() if srcidx not in freq_map: freq_map[srcidx] = {} if tgtidx not in freq_map[srcidx]: freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open( os.path.join( args.destdir, "alignment.{}-{}.txt".format(args.source_lang, args.target_lang), ), "w", encoding='utf-8' ) as f: for k, v in align_dict.items(): print("{} {}".format(src_dict[k], tgt_dict[v]), file=f) def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab)) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) return res def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl, vocab_size=None) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) return res def dataset_dest_prefix(args, output_prefix, lang): base = "{}/{}".format(args.destdir, output_prefix) if lang is not None: lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang) elif args.only_source: lang_part = "" else: lang_part = ".{}-{}".format(args.source_lang, args.target_lang) return "{}{}".format(base, lang_part) def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return "{}.{}".format(base, extension) def get_offsets(input_file, num_workers): return Binarizer.find_offsets(input_file, num_workers) def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args) if __name__ == "__main__": cli_main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_sequence_scorer.py
tests/test_sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import unittest import torch from fairseq.sequence_scorer import SequenceScorer import tests.utils as test_utils class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { 'source': torch.LongTensor([w1, w2, eos]), 'target': torch.LongTensor([w1, w2, w1, eos]), }, { 'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, w1, eos]), }, { 'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ]), # step 1: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ]), # step 2: torch.FloatTensor([ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ]), # step 3: torch.FloatTensor([ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ]), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer(task.target_dictionary) for sample in data_itr: hypos = task.inference_step(scorer, [model], sample) for id, hypos_id in zip(sample['id'].tolist(), hypos): self.assertHypoTokens(hypos_id[0], data[id]['target']) self.assertHypoScore(hypos_id[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo['positional_scores'], pos_scores) self.assertEqual(pos_scores.numel(), hypo['tokens'].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel()**lenpen self.assertLess(abs(score - hypo['score']), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_inference_dropout.py
tests/test_inference_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from tests.test_sequence_generator import get_dummy_task_and_parser from fairseq.models.transformer import TransformerModel class TestInferenceDropout(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() TransformerModel.add_args(self.parser) self.args = self.parser.parse_args([]) self.args.encoder_layers = 2 self.args.decoder_layers = 1 logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_sets_inference_dropout_to_true(self): self.args.retain_dropout = True self.transformer_model = TransformerModel.build_model(self.args, self.task) self.transformer_model.prepare_for_inference_(self.args) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.apply_during_inference def test_inference_dropout_false_by_default(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) self.transformer_model.prepare_for_inference_(self.args) assert not self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference def test_applies_training_mode(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) assert self.transformer_model.encoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.training self.transformer_model.eval() assert not self.transformer_model.decoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.training def test_retain_modules(self): self.args.retain_dropout = True self.args.retain_dropout_modules = ['TransformerEncoder', 'TransformerEncoderLayer'] self.transformer_model = TransformerModel.build_model(self.args, self.task) self.transformer_model.prepare_for_inference_(self.args) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_metrics.py
tests/test_metrics.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import uuid from fairseq import metrics class TestMetrics(unittest.TestCase): def test_nesting(self): with metrics.aggregate() as a: metrics.log_scalar('loss', 1) with metrics.aggregate() as b: metrics.log_scalar('loss', 2) self.assertEqual(a.get_smoothed_values()['loss'], 1.5) self.assertEqual(b.get_smoothed_values()['loss'], 2) def test_new_root(self): with metrics.aggregate() as a: metrics.log_scalar('loss', 1) with metrics.aggregate(new_root=True) as b: metrics.log_scalar('loss', 2) self.assertEqual(a.get_smoothed_values()['loss'], 1) self.assertEqual(b.get_smoothed_values()['loss'], 2) def test_nested_new_root(self): with metrics.aggregate() as layer1: metrics.log_scalar('loss', 1) with metrics.aggregate(new_root=True) as layer2: metrics.log_scalar('loss', 2) with metrics.aggregate() as layer3: metrics.log_scalar('loss', 3) with metrics.aggregate(new_root=True) as layer4: metrics.log_scalar('loss', 4) metrics.log_scalar('loss', 1.5) self.assertEqual(layer4.get_smoothed_values()['loss'], 4) self.assertEqual(layer3.get_smoothed_values()['loss'], 3) self.assertEqual(layer2.get_smoothed_values()['loss'], 2.5) self.assertEqual(layer1.get_smoothed_values()['loss'], 1.25) def test_named(self): name = str(uuid.uuid4()) metrics.reset_meters(name) with metrics.aggregate(name): metrics.log_scalar('loss', 1) metrics.log_scalar('loss', 3) with metrics.aggregate(name): metrics.log_scalar('loss', 2) self.assertEqual(metrics.get_smoothed_values(name)['loss'], 1.5) def test_nested_duplicate_names(self): name = str(uuid.uuid4()) metrics.reset_meters(name) with metrics.aggregate(name): metrics.log_scalar('loss', 1) with metrics.aggregate() as other: with metrics.aggregate(name): metrics.log_scalar('loss', 2) metrics.log_scalar('loss', 6) self.assertEqual(metrics.get_smoothed_values(name)['loss'], 3) self.assertEqual(other.get_smoothed_values()['loss'], 2) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_file_io.py
tests/test_file_io.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import tempfile import os import shutil from typing import Optional import unittest from unittest.mock import MagicMock class TestFileIO(unittest.TestCase): _tmpdir: Optional[str] = None _tmpfile: Optional[str] = None _tmpfile_contents = "Hello, World" @classmethod def setUpClass(cls) -> None: cls._tmpdir = tempfile.mkdtemp() with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f: cls._tmpfile = f.name f.write(cls._tmpfile_contents) f.flush() @classmethod def tearDownClass(cls) -> None: # Cleanup temp working dir. if cls._tmpdir is not None: shutil.rmtree(cls._tmpdir) # type: ignore def test_file_io(self): from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents) def test_file_io_oss(self): # Mock fvcore to simulate oss environment. sys.modules['fvcore'] = MagicMock() from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_sparse_multihead_attention.py
tests/test_sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class TestSparseMultiheadAttention(unittest.TestCase): def test_sparse_multihead_attention(self): attn_weights = torch.randn(1, 8, 8) bidirectional_sparse_mask = torch.tensor([ [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0] ]) bidirectional_attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=True) bidirectional_attention_sparse_mask = bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask)) sparse_mask = torch.tensor([ [0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], ]) attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=False) attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(attention_sparse_mask, sparse_mask)) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_noising.py
tests/test_noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Dict, List import tests.utils as test_utils import torch from fairseq import utils from fairseq.data import ( Dictionary, LanguagePairDataset, TransformEosDataset, data_utils, noising, ) class TestDataNoising(unittest.TestCase): def _get_test_data_with_bpe_cont_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with continuation markers as suffixes to denote non-end of word tokens. This is the standard BPE format used in fairseq's preprocessing. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he@@") vocab.add_symbol("llo") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("y@@") vocab.add_symbol("ou") vocab.add_symbol("n@@") vocab.add_symbol("ew") vocab.add_symbol("or@@") vocab.add_symbol("k") src_tokens = [ ["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"], ["how", "are", "y@@", "ou"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_bpe_end_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with end-of-word markers as suffixes to denote tokens at the end of a word. This is an alternative to fairseq's standard preprocessing framework and is not generally supported within fairseq. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he") vocab.add_symbol("llo_EOW") vocab.add_symbol("how_EOW") vocab.add_symbol("are_EOW") vocab.add_symbol("y") vocab.add_symbol("ou_EOW") vocab.add_symbol("n") vocab.add_symbol("ew_EOW") vocab.add_symbol("or") vocab.add_symbol("k_EOW") src_tokens = [ ["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"], ["how_EOW", "are_EOW", "y", "ou_EOW"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_word_vocab(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: word vocab x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("you") vocab.add_symbol("new") vocab.add_symbol("york") src_tokens = [ ["hello", "new", "york", "you"], ["how", "are", "you", "new", "york"], ] x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _convert_src_tokens_to_tensor( self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool ): src_len = [len(x) for x in src_tokens] # If we have to append EOS, we include EOS in counting src length if append_eos: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if append_eos: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) return x, torch.LongTensor(src_len) def assert_eos_at_end(self, x, x_len, eos): """Asserts last token of every sentence in x is EOS """ for i in range(len(x_len)): self.assertEqual( x[x_len[i] - 1][i], eos, ( "Expected eos (token id {eos}) at the end of sentence {i} " "but got {other} instead" ).format(i=i, eos=eos, other=x[i][-1]), ) def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised): # Expect only the first word (2 bpe tokens) of the first example # was dropped out self.assertEqual(x_len[0] - 2, l_noised[0]) for i in range(l_noised[0]): self.assertEqual(x_noised[i][0], x[i + 2][0]) def test_word_dropout_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk): # Expect only the first word (2 bpe tokens) of the first example # was blanked out self.assertEqual(x_len[0], l_noised[0]) for i in range(l_noised[0]): if i < 2: self.assertEqual(x_noised[i][0], unk) else: self.assertEqual(x_noised[i][0], x[i][0]) def test_word_blank_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def generate_unchanged_shuffle_map(self, length): return {i: i for i in range(length)} def assert_word_shuffle_matches_expected( self, x, x_len, max_shuffle_distance: int, vocab: Dictionary, expected_shufle_maps: List[Dict[int, int]], expect_eos_at_end: bool, bpe_end_marker=None, ): """ This verifies that with a given x, x_len, max_shuffle_distance, and vocab, we get the expected shuffle result. Args: x: Tensor of shape (T x B) = (sequence_length, batch_size) x_len: Tensor of length B = batch_size max_shuffle_distance: arg to pass to noising expected_shuffle_maps: List[mapping] where mapping is a Dict[old_index, new_index], mapping x's elements from their old positions in x to their new positions in x. expect_eos_at_end: if True, check the output to make sure there is an EOS at the end. bpe_end_marker: str denoting the BPE end token. If this is not None, we set the BPE cont token to None in the noising classes. """ bpe_cont_marker = None if bpe_end_marker is None: bpe_cont_marker = "@@" with data_utils.numpy_seed(1234): word_shuffle = noising.WordShuffle( vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker ) x_noised, l_noised = word_shuffle.noising( x, x_len, max_shuffle_distance=max_shuffle_distance ) # For every example, we have a different expected shuffle map. We check # that each example is shuffled as expected according to each # corresponding shuffle map. for i in range(len(expected_shufle_maps)): shuffle_map = expected_shufle_maps[i] for k, v in shuffle_map.items(): self.assertEqual(x[k][i], x_noised[v][i]) # Shuffling should not affect the length of each example for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised): self.assertEqual(pre_shuffle_length, post_shuffle_length) if expect_eos_at_end: self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_shuffle_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=True, ) def test_word_shuffle_with_eos_nonbpe(self): """The purpose of this is to test shuffling logic with word vocabs""" vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ {0: 0, 1: 1, 2: 3, 3: 2}, {0: 0, 1: 2, 2: 1, 3: 3, 4: 4}, ], expect_eos_at_end=True, ) def test_word_shuffle_without_eos(self): """Same result as word shuffle with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, ) def test_word_shuffle_without_eos_with_bpe_end_marker(self): """Same result as word shuffle without eos except using BPE end token""" vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) def assert_no_eos_at_end(self, x, x_len, eos): """Asserts that the last token of each sentence in x is not EOS """ for i in range(len(x_len)): self.assertNotEqual( x[x_len[i] - 1][i], eos, "Expected no eos (token id {eos}) at the end of sentence {i}.".format( eos=eos, i=i ), ) def test_word_dropout_without_eos(self): """Same result as word dropout with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_blank_without_eos(self): """Same result as word blank with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def _get_noising_dataset_batch( self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False, ): """ Constructs a NoisingDataset and the corresponding ``LanguagePairDataset(NoisingDataset(src), src)``. If *append_eos_to_tgt* is True, wrap the source dataset in :class:`TransformEosDataset` to append EOS to the clean source when using it as the target. """ src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset( src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising, ) tgt = src_dataset language_pair_dataset = LanguagePairDataset( src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict ) language_pair_dataset = TransformEosDataset( language_pair_dataset, src_dict.eos(), append_eos_to_tgt=append_eos_to_tgt, ) dataloader = torch.utils.data.DataLoader( dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater, ) denoising_batch_result = next(iter(dataloader)) return denoising_batch_result def test_noising_dataset_with_eos(self): src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=True ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set *append_eos_to_tgt* to ``True``. """ src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=False ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict, append_eos_to_tgt=True, ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_convtbc.py
tests/test_convtbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.modules import ConvTBC import torch.nn as nn class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_concat_dataset.py
tests/test_concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.concat_dataset import ConcatDataset from tests.test_train import mock_dict class TestConcatDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def test_concat_dataset_basics(self): d = ConcatDataset( [self.dataset_1, self.dataset_2] ) assert(len(d) == 2) assert(d[0]['source'][0] == 1) assert(d[1]['source'][0] == 2) d = ConcatDataset( [self.dataset_1, self.dataset_2], sample_ratios=[1, 2] ) assert(len(d) == 3) assert(d[0]['source'][0] == 1) assert(d[1]['source'][0] == 2) assert(d[2]['source'][0] == 2) d = ConcatDataset( [self.dataset_1, self.dataset_2], sample_ratios=[2, 1] ) assert(len(d) == 3) assert(d[0]['source'][0] == 1) assert(d[1]['source'][0] == 1) assert(d[2]['source'][0] == 2)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_export.py
tests/test_export.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.modules import multihead_attention, sinusoidal_positional_embedding from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ Return a dummy task and argument parser, which can be used to create a model/criterion. """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def _test_save_and_load(scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestExportModels(unittest.TestCase): def test_export_multihead_attention(self): module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) scripted = torch.jit.script(module) _test_save_and_load(scripted) def test_incremental_state_multihead_attention(self): module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module1 = torch.jit.script(module1) module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module2 = torch.jit.script(module2) state = {} state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])}) state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])}) v1 = module1.get_incremental_state(state, "key")["a"] v2 = module2.get_incremental_state(state, "key")["a"] self.assertEqual(v1, 1) self.assertEqual(v2, 2) def test_positional_embedding(self): module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding( embedding_dim=8, padding_idx=1 ) scripted = torch.jit.script(module) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_bmuf.py
tests/test_bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from multiprocessing import Manager import random import unittest import torch import torch.nn as nn from fairseq import distributed_utils, optim class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) def forward(self, input): output = self.fc(input) return output def setup_model_loss_criterion(args, rank, is_cuda): """ setup model, criterion and optimizer based on input args """ args.distributed_rank = rank if args.distributed_world_size > 1: distributed_utils.distributed_init(args) torch.manual_seed(1) model = Model(args.input_size, args.nb_classes) loss_fn = nn.CrossEntropyLoss() if is_cuda: model = model.cuda() loss_fn = loss_fn.cuda() optimizer = optim.sgd.SGD(args, model.parameters()) optimizer = optim.FairseqBMUF(args, optimizer) return model, loss_fn, optimizer def train_step(input, target, model, loss_fn, optimizer, **unused): """Do forward, backward and parameter update.""" model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step() def single_gpu_training(args, rank, iterations, shared_results): is_cuda = torch.cuda.is_available() if is_cuda: torch.cuda.set_device(rank) model, loss_fn, optimizer = setup_model_loss_criterion(args, rank, is_cuda) for _ in range(iterations): input = torch.randn(1, args.input_size) target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) if is_cuda: input = input.cuda() target = target.cuda() train_step(input, target, model, loss_fn, optimizer) results = [] for param in model.parameters(): if len(results) == 0: results = param.flatten().cpu().data else: results = torch.cat((results, param.flatten().cpu().data), 0) shared_results[rank] = results def setup_args(): args = argparse.Namespace() args.global_sync_iter = 20 args.block_momentum = 0.875 args.block_lr = 0.5 args.input_size = 5 args.nb_classes = 2 args.batch_size = 1 args.lr = [1e-3] args.momentum = 0 args.weight_decay = 0 args.warmup_iterations = 0 args.use_nbm = True args.average_sync = True args.global_sync_iter = 1 args.model_parallel_size = 1 args.distributed_backend = "gloo" args.distributed_world_size = 2 port = random.randint(10000, 20000) args.distributed_init_method = "tcp://localhost:{port}".format(port=port) args.distributed_init_host = "localhost" args.distributed_port = port + 1 args.local_world_size = args.distributed_world_size return args @unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") class TestBMUF(unittest.TestCase): def bmuf_process(self, args, iterations): processes = [] results = Manager().dict() ctx = torch.multiprocessing.get_context("spawn") for rank in range(args.distributed_world_size): p = ctx.Process( target=single_gpu_training, args=(args, rank, iterations, results) ) p.start() processes.append(p) for p in processes: p.join() return results def test_bmuf_sync(self): # Train model for 1 iteration and do bmuf sync without doing warmup args = setup_args() iterations = 1 results = self.bmuf_process(args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): # Train model for 20 iteration and do warmup sync without doing bmuf sync args = setup_args() args.warmup_iterations = 20 iterations = 20 results = self.bmuf_process(args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): # Train model for 25 iteration and do warmup sync after 20 iteration # and bmuf sync after 25 iteration args = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 iterations = 25 results = self.bmuf_process(args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): # Train model for 5 iterations and use GPU 1 args = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 iterations = 20 results = self.bmuf_process(args, iterations) assert len(results) == 1 def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_memory_efficient_fp16.py
tests/test_memory_efficient_fp16.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import unittest import torch from fairseq.optim.adam import FairseqAdam from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') class TestMemoryEfficientFP16(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_load_state_dict(self): # define simple FP16 model model = torch.nn.Linear(5, 5).cuda().half() params = list(model.parameters()) # initialize memory efficient FP16 optimizer optimizer = FairseqAdam( argparse.Namespace( lr=[0.00001], adam_betas='(0.9, 0.999)', adam_eps=1e-8, weight_decay=0.0, ), params, ) me_optimizer = MemoryEfficientFP16Optimizer( argparse.Namespace( fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4, ), params, optimizer, ) # optimizer state is created in the first step loss = model(torch.rand(5).cuda().half()).sum() me_optimizer.backward(loss) me_optimizer.step() # reload state state = me_optimizer.state_dict() me_optimizer.load_state_dict(state) for k, v in me_optimizer.optimizer.state.items(): self.assertTrue(k.dtype == torch.float16) for v_i in v.values(): if torch.is_tensor(v_i): self.assertTrue(v_i.dtype == torch.float32) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_multihead_attention.py
tests/test_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.modules.multihead_attention import MultiheadAttention class TestMultiheadAttention(unittest.TestCase): def test_append_prev_key_padding_mask(self): bsz = 1 src_len = 4 cases = [ # no padding mask (None, None, None), # current padding mask only ( torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool(), ), # previous padding mask only ( None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool(), ), # both padding masks ( torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), ] for c in cases: key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False, ) if key_padding_mask is not None: self.assertTrue( torch.all(torch.eq(key_padding_mask, c[2])), f'Unexpected resultant key padding mask: {key_padding_mask}' f' given current: {c[0]} and previous: {c[1]}', ) self.assertEqual(key_padding_mask.size(0), bsz) self.assertEqual(key_padding_mask.size(1), src_len) else: self.assertIsNone(c[2]) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_utils.py
tests/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq import utils class TestUtils(unittest.TestCase): def test_convert_padding_direction(self): pad = 1 left_pad = torch.LongTensor([ [2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12], ]) right_pad = torch.LongTensor([ [2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1], ]) self.assertAlmostEqual( right_pad, utils.convert_padding_direction( left_pad, pad, left_to_right=True, ), ) self.assertAlmostEqual( left_pad, utils.convert_padding_direction( right_pad, pad, right_to_left=True, ), ) def test_make_positions(self): pad = 1 left_pad_input = torch.LongTensor([ [9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9], ]) left_pad_output = torch.LongTensor([ [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3], ]) right_pad_input = torch.LongTensor([ [9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1], ]) right_pad_output = torch.LongTensor([ [2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1], ]) self.assertAlmostEqual( left_pad_output, utils.make_positions(left_pad_input, pad), ) self.assertAlmostEqual( right_pad_output, utils.make_positions(right_pad_input, pad), ) def test_clip_grad_norm_(self): params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, 0.0) params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)] for p in params: p.grad = torch.full((5,), fill_value=2.) grad_norm = utils.clip_grad_norm_(params, 1.0) exp_grad_norm = torch.full((15,), fill_value=2.).norm() self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, exp_grad_norm) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertAlmostEqual(grad_norm, torch.tensor(1.0)) def test_resolve_max_positions_with_tuple(self): resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000) self.assertEqual(resolved, (2000, 100, 2000)) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_reproducibility.py
tests/test_reproducibility.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from io import StringIO import json import os import tempfile import unittest import torch from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility( self, name, extra_flags=None, delta=0.0001, resume_checkpoint='checkpoint1.pt', max_epoch=3, ): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::-1]: if search_string in log_record.msg: return json.loads(log_record.msg) if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, 'fconv_iwslt_de_en', [ '--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch), ] + extra_flags, ) train_log = get_last_log_stats_containing_string(logs.records, 'train_loss') valid_log = get_last_log_stats_containing_string(logs.records, 'valid_loss') # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, 'checkpoint_last.pt'), ) with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, 'fconv_iwslt_de_en', [ '--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch), ] + extra_flags, ) train_res_log = get_last_log_stats_containing_string(logs.records, 'train_loss') valid_res_log = get_last_log_stats_containing_string(logs.records, 'valid_loss') for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']: self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta) for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']: self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta) def test_reproducibility(self): self._test_reproducibility('test_reproducibility') @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') def test_reproducibility_fp16(self): self._test_reproducibility('test_reproducibility_fp16', [ '--fp16', '--fp16-init-scale', '4096', ], delta=0.011) @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility('test_reproducibility_memory_efficient_fp16', [ '--memory-efficient-fp16', '--fp16-init-scale', '4096', ]) def test_mid_epoch_reproducibility(self): self._test_reproducibility( 'test_mid_epoch_reproducibility', ['--save-interval-updates', '3'], resume_checkpoint='checkpoint_1_3.pt', max_epoch=1, ) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_sequence_generator.py
tests/test_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import tests.utils as test_utils import torch from fairseq import search from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.sequence_generator import SequenceGenerator, EnsembleModel from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitSequenceGeneratorBase(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() eos = self.task.tgt_dict.eos() src_tokens = torch.randint(3, 50, (2, 10)).long() src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1) src_lengths = torch.LongTensor([2, 10]) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } TransformerModel.add_args(self.parser) args = self.parser.parse_args([]) args.encoder_layers = 2 args.decoder_layers = 1 self.transformer_model = TransformerModel.build_model(args, self.task) def assertOutputEqual(self, hypo, pos_probs): pos_scores = torch.FloatTensor(pos_probs).log() self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores) self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel()) def assertTensorSizeEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def assertHypoEqual(self, h1, h2): "Check two hypos are equal" self.assertTensorEqual(h1["tokens"], h2["tokens"]) self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"]) self.assertLess(abs(h1["score"] - h2["score"]), 1e-6) self.assertAlmostEqual(h1["attention"], h2["attention"]) def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestJitSequeneceGenerator(TestJitSequenceGeneratorBase): @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): model = self.transformer_model torch.jit.script(model) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_ensemble_sequence_generator(self): model = self.transformer_model generator = SequenceGenerator( [model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2 ) scripted_model = torch.jit.script(generator) self._test_save_and_load(scripted_model) class TestJitEnsemble(TestJitSequenceGeneratorBase): @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_ensemble_model(self): model = self.transformer_model ensemble_models = EnsembleModel([model]) torch.jit.script(ensemble_models) class TestExportSearch(unittest.TestCase): def setUp(self): task, _ = get_dummy_task_and_parser() self.tgt_dict = task.tgt_dict self.min_top1_prob = 0.4 def test_export_diverse_bs(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) torch.jit.script(search_strategy) def test_export_sampling(self): low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) torch.jit.script(search_strategy) def test_export_diverse_siblings_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) torch.jit.script(search_strategy) class TestSequenceGeneratorBase(unittest.TestCase): def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestSequeneceGenerator(TestSequenceGeneratorBase): def setUp(self): self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model = ( test_utils.sequence_generator_setup() ) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, normalize_scores=False ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator([reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert hypos[sent][beam]['attention'] is not None def test_generation_with_additional_input(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict) add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task) generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2) sample = self.sample.copy() sample['net_input']['fancy_other_input'] = sample['net_input']['src_tokens'] hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) class TestDiverseBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor([ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ]) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ]), # step 1: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ]), # step 2: torch.FloatTensor([ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ]), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): search_strategy = search.DiverseBeamSearch(self.tgt_dict, num_groups=2, diversity_strength=0.) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy, ) sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}} hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) class TestDiverseSiblingsSearch(TestDiverseBeamSearch): def assertHypoScore( self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0 ): pos_scores = torch.FloatTensor(pos_probs).log() pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate) self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def test_diverse_beam_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor([ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ]) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. # The minimal probability of top 2 tokens. self.min_top2_prob = 0.75 # The minimal probability of the top 1 token. self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = self.min_top2_prob - self.min_top1_prob eos_prob = 1 - self.min_top2_prob args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], ]), # step 1: torch.FloatTensor([ # eos w1 w2 [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], ]), # step 2: torch.FloatTensor([ # eos w1 w2 [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], ]), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): # Given a prob low enough to top-P sampling, we expect only the top # 1 token to be sampled, which always results in the same output. low_sampling_topp = self.min_top1_prob/2.0 search_strategy = search.Sampling(self.tgt_dict, sampling_topp=low_sampling_topp) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy) sample = { 'net_input': { 'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths } } hypos = generator.forward(sample) eos, w1 = self.eos, self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): # Given a prob high enough to top-P sampling, any of the top 2 # tokens could be sampled. This can cause different outputs. high_sampling_topp = (self.min_top1_prob+self.min_top2_prob)/2.0 search_strategy = search.Sampling(self.tgt_dict, sampling_topp=high_sampling_topp) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy) sample = { 'net_input': { 'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertTrue(self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])) # sentence 1, beam 2 self.assertTrue(self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])) # sentence 2, beam 1 self.assertTrue(self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])) # sentence 2, beam 2 self.assertTrue(self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos])) self.assertTrue(self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() if not self.almostEqual(hypo['positional_scores'], pos_scores): return False if pos_scores.numel() != hypo['tokens'].numel(): return False score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen return abs(score - hypo['score']) < 1e-6 def almostEqual(self, t1, t2): return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4 def tensorEqual(self, t1, t2): return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0 if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_fp16_optimizer.py
tests/test_fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import torch from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer @unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU') class TestGradientScaling(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda().half() self.params = list(self.model.parameters()) self.namespace_dls = argparse.Namespace( optimizer='adam', lr=[0.1], adam_betas='(0.9, 0.999)', adam_eps=1e-8, weight_decay=0.0, fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4 ) def run_iter(self, model, params, optimizer): optimizer.zero_grad() y = model(self.x) loss = self.loss_fn(y, self.target) optimizer.backward(loss) self.assertEqual(loss, torch.tensor(1., device='cuda:0', dtype=torch.float16)) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) optimizer.step() self.assertEqual(model.weight, torch.tensor([[3.0996]], device='cuda:0', dtype=torch.float16, requires_grad=True)) self.assertEqual(model.bias, torch.tensor([5.1016], device='cuda:0', dtype=torch.float16, requires_grad=True)) self.assertEqual(optimizer.scaler.loss_scale, 2.) def test_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = FP16Optimizer.build_optimizer(self.namespace_dls, params) self.run_iter(model, params, optimizer) self.assertTrue(torch.all(optimizer.fp32_params.eq(torch.tensor([3.1000, 5.1000], device='cuda:0', requires_grad=True)))) def test_memory_efficient(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.namespace_dls, params) self.run_iter(model, params, optimizer) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/utils.py
tests/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import random import sys import torch import torch.nn.functional as F from io import StringIO from fairseq import options, utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.tasks import LegacyFairseqTask from fairseq.tasks import FairseqTask from fairseq_cli import ( generate, interactive, preprocess, train, validate, ) def dummy_dictionary(vocab_size, prefix='token_'): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if 'id' not in sample: sample['id'] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ]), # step 1: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ]), # step 2: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [0.6, unk, 0.2, 0.2], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [0.60, unk, 0.4, 0.00], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ]), # step 3: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [1.0, unk, 0.0, 0.0], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [0.1, unk, 0.5, 0.4], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [1.0, unk, 0.0, 0.0], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ]), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False): def _create_dummy_data(filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(data_dir, filename), 'w') as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = ' '.join(map(chr, data[offset:offset+ex_len])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), 'r') as src_f, \ open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, \ open(os.path.join(data_dir, filename), 'w') as h: for src, tgt in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = (src_len + tgt_len) // 2 num_alignments = random.randint(avg_len // 2, 2 * avg_len) src_indices = torch.floor(torch.rand(num_alignments) * src_len).int() tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int() ex_str = ' '.join(["{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices)]) print(ex_str, file=h) _create_dummy_data('train.in') _create_dummy_data('train.out') _create_dummy_data('valid.in') _create_dummy_data('valid.out') _create_dummy_data('test.in') _create_dummy_data('test.out') if alignment: _create_dummy_alignment_data('train.in', 'train.out', 'train.align') _create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align') _create_dummy_alignment_data('test.in', 'test.out', 'test.align') def preprocess_lm_data(data_dir): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args([ '--only-source', '--trainpref', os.path.join(data_dir, 'train.out'), '--validpref', os.path.join(data_dir, 'valid.out'), '--testpref', os.path.join(data_dir, 'test.out'), '--destdir', data_dir, ]) preprocess.main(preprocess_args) def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ '--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False, lang_flags=None, extra_valid_flags=None): if lang_flags is None: lang_flags = [ '--source-lang', 'in', '--target-lang', 'out', ] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', task, data_dir, '--save-dir', data_dir, '--arch', arch, '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--num-workers', 0, ] + lang_flags + (extra_flags or []), ) train.main(train_args) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ '--task', task, data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--valid-subset', 'valid', '--max-tokens', '500', '--no-progress-bar', ] + lang_flags + (extra_valid_flags or []) ) validate.main(validate_args) def generate_main(data_dir, extra_flags=None): if extra_flags is None: extra_flags = [ '--print-alignment', ] generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar', ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.input = '-' generate_args.max_sentences = None orig_stdin = sys.stdin sys.stdin = StringIO('h e l l o\n') interactive.main(generate_args) sys.stdin = orig_stdin class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data self.sizes = None def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(LegacyFairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, 'beam_probs') or hasattr(args, 'probs') args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100) self.args = args def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, 'step') if step is None: step = 0 utils.set_incremental_state(self, incremental_state, 'step', step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, 'probs'): assert self.args.probs.dim() == 3, \ 'expected probs to have size bsz*steps*vocab' probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) dev = prev_output_tokens.device return probs.to(dev), {"attn": [attn.to(dev)]} def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions class TestReshapingEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): b_sz, t_sz = src_tokens.shape padding_needed = t_sz % 2 x = src_tokens if padding_needed > 0: padding_needed = 2 - padding_needed x = F.pad(x, (0, padding_needed)) return EncoderOut( encoder_out=x.view(b_sz, -1, 2), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestReshapingModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestReshapingEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestAdditionalInputEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): assert 'fancy_other_input' in kwargs assert kwargs['fancy_other_input'] is not None return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestAdditionalInputModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestAdditionalInputEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): encoder_out = self.encoder( src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs) return decoder_out
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_dictionary.py
tests/test_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import tempfile import unittest import torch from fairseq.data import Dictionary class TestDictionary(unittest.TestCase): def test_finalize(self): txt = [ 'A B C D', 'B C D', 'C D', 'D', ] ref_ids1 = list(map(torch.IntTensor, [ [4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2], ])) ref_ids2 = list(map(torch.IntTensor, [ [7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2], ])) # build dictionary d = Dictionary() for line in txt: d.encode_line(line, add_if_not_exist=True) def get_ids(dictionary): ids = [] for line in txt: ids.append(dictionary.encode_line(line, add_if_not_exist=False)) return ids def assertMatch(ids, ref_ids): for toks, ref_toks in zip(ids, ref_ids): self.assertEqual(toks.size(), ref_toks.size()) self.assertEqual(0, (toks != ref_toks).sum().item()) ids = get_ids(d) assertMatch(ids, ref_ids1) # check finalized dictionary d.finalize() finalized_ids = get_ids(d) assertMatch(finalized_ids, ref_ids2) # write to disk and reload with tempfile.NamedTemporaryFile(mode='w') as tmp_dict: d.save(tmp_dict.name) d = Dictionary.load(tmp_dict.name) reload_ids = get_ids(d) assertMatch(reload_ids, ref_ids2) assertMatch(finalized_ids, reload_ids) def test_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999 #fairseq:overwrite\n" "<s> 999 #fairseq:overwrite\n" "</s> 999 #fairseq:overwrite\n" ", 999\n" "▁de 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index('<pad>'), 1) self.assertEqual(d.index('foo'), 3) self.assertEqual(d.index('<unk>'), 4) self.assertEqual(d.index('<s>'), 5) self.assertEqual(d.index('</s>'), 6) self.assertEqual(d.index(','), 7) self.assertEqual(d.index('▁de'), 8) def test_no_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n" ) d = Dictionary() with self.assertRaisesRegex(RuntimeError, 'Duplicate'): d.add_from_file(dict_file) def test_space(self): # for example, character models treat space as a symbol dict_file = io.StringIO( " 999\n" "a 999\n" "b 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index(' '), 4) self.assertEqual(d.index('a'), 5) self.assertEqual(d.index('b'), 6) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_token_block_dataset.py
tests/test_token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import TokenBlockDataset import tests.utils as test_utils class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockDataset(underlying_ds, sizes, **kwargs) def test_eos_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos') self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [1]) self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos') self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) self.assertEqual(ds[2].tolist(), [1]) def test_block_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='none') self.assertEqual(ds[0].tolist(), [5, 4, 3]) self.assertEqual(ds[1].tolist(), [2, 1, 8]) self.assertEqual(ds[2].tolist(), [7, 6, 1]) self.assertEqual(ds[3].tolist(), [9, 1]) def test_complete_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=6, pad=0, eos=1, break_mode='complete') self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) data = [ torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='complete') self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [5, 1, 1]) self.assertEqual(ds[2].tolist(), [6, 1]) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_lstm_jitable.py
tests/test_lstm_jitable.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.lstm import LSTMModel from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitLSTMModel(unittest.TestCase): def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) def assertTensorEqual(self, t1, t2): t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs t2 = t2[~torch.isnan(t2)] self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def test_jit_and_export_lstm(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) scripted_model = torch.jit.script(model) self._test_save_and_load(scripted_model) def test_assert_jit_vs_nonjit_(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) model.eval() scripted_model = torch.jit.script(model) scripted_model.eval() idx = len(task.source_dictionary) iter = 100 # Inject random input and check output seq_len_tensor = torch.randint(1, 10, (iter, )) num_samples_tensor = torch.randint(1, 10, (iter, )) for i in range(iter): seq_len = seq_len_tensor[i] num_samples = num_samples_tensor[i] src_token = torch.randint(0, idx, (num_samples, seq_len)), src_lengths = torch.randint(1, seq_len+1, (num_samples,)) src_lengths, _ = torch.sort(src_lengths, descending=True) # Force the first sample to have seq_len src_lengths[0] = seq_len prev_output_token = torch.randint(0, idx, (num_samples, 1)), result = model(src_token[0], src_lengths, prev_output_token[0], None) scripted_result = scripted_model(src_token[0], src_lengths, prev_output_token[0], None) self.assertTensorEqual(result[0], scripted_result[0]) self.assertTensorEqual(result[1], scripted_result[1]) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_label_smoothing.py
tests/test_label_smoothing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion import tests.utils as test_utils class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding {'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])}, {'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])}, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.probs = torch.FloatTensor([ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ]).unsqueeze(0).expand(2, 3, 7) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample) self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/__init__.py
tests/__init__.py
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_binaries.py
tests/test_binaries.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib from io import StringIO import logging import os import random import tempfile import unittest import torch from fairseq import options from fairseq_cli import train from fairseq_cli import eval_lm from fairseq_cli import validate from tests.utils import ( create_dummy_data, preprocess_lm_data, preprocess_translation_data, train_translation_model, generate_main, ) class TestTranslation(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en') generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--dataset-impl', 'raw']) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw']) generate_main(data_dir, ['--dataset-impl', 'raw']) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_update_freq') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3']) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_max_positions') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'], ) self.assertTrue( 'skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception) ) train_translation_model( data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test']) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_sampling') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en') generate_main(data_dir, [ '--sampling', '--temperature', '2', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--sampling', '--sampling-topp', '0.2', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--diversity-rate', '0.5', '--beam', '6', ]) with self.assertRaises(ValueError): generate_main(data_dir, [ '--diverse-beam-groups', '4', '--match-source-len', ]) generate_main(data_dir, ['--prefix-size', '2']) generate_main(data_dir, ['--retain-dropout']) def test_eval_bleu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', [ '--eval-bleu', '--eval-bleu-print-samples', '--eval-bleu-remove-bpe', '--eval-bleu-detok', 'space', '--eval-bleu-args', '{"beam": 4, "min_len": 10}', ]) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', ]) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lstm', [ '--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '16', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--decoder-layers', '2', ]) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ], run_validation=True) generate_main(data_dir) def test_multilingual_transformer(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']] decoder_langtok_flags = [[], ['--decoder-langtok']] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch='multilingual_transformer', task='multilingual_translation', extra_flags=[ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ] + enc_ltok_flag + dec_ltok_flag, lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ '--task', 'multilingual_translation', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out', ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']] decoder_langtok_flags = [[], ['--decoder-langtok']] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory(f'test_translation_multi_simple_epoch_{i}_{j}') as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=['--joined-dictionary'] ) train_translation_model( data_dir, arch='transformer', task='translation_multi_simple_epoch', extra_flags=[ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--sampling-method', 'temperature', '--sampling-temperature', '1.5', '--virtual-epoch-size', '1000', ] + enc_ltok_flag + dec_ltok_flag, lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ '--task', 'translation_multi_simple_epoch', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out', ] + enc_ltok_flag + dec_ltok_flag, ) def test_transformer_cross_self_attention(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-embed-dim', '8', '--no-cross-attention', '--cross-self-attention', ], run_validation=True) generate_main(data_dir, extra_flags=[]) def test_lightconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lightconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lightconv_iwslt_de_en', [ '--encoder-conv-type', 'lightweight', '--decoder-conv-type', 'lightweight', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ]) generate_main(data_dir) def test_dynamicconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lightconv_iwslt_de_en', [ '--encoder-conv-type', 'dynamic', '--decoder-conv-type', 'dynamic', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ]) generate_main(data_dir) def test_cmlm_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'cmlm_transformer', [ '--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step', ]) def test_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'nonautoregressive_transformer', [ '--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '0', '--iter-decode-eos-penalty', '0', '--print-step', ]) # def test_nat_crf_transformer(self): # with contextlib.redirect_stdout(StringIO()): # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: # create_dummy_data(data_dir) # preprocess_translation_data(data_dir, ['--joined-dictionary']) # train_translation_model(data_dir, 'nacrf_transformer', [ # '--apply-bert-init', '--criterion', # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', # '--length-loss-factor', '0.1', # '--word-ins-loss-factor', '0.5', # '--crf-lowrank-approx', '1', # '--crf-beam-approx', '1' # ], task='translation_lev') # generate_main(data_dir, [ # '--task', 'translation_lev', # '--iter-decode-max-iter', '0', # '--iter-decode-eos-penalty', '0', # '--print-step', # ]) def test_iterative_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', [ '--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--stochastic-approx', '--dae-ratio', '0.5', '--train-step', '3' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step', ]) def test_insertion_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'insertion_transformer', [ '--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'random_mask' ], task='translation_lev') generate_main(data_dir, [ '--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step', ]) def test_mixture_of_experts(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_moe') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', [ '--task', 'translation_moe', '--user-dir', 'examples/translation_moe/src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', ]) generate_main(data_dir, [ '--task', 'translation_moe', '--user-dir', 'examples/translation_moe/src', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--gen-expert', '0' ]) def test_alignment(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_alignment') as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ['--align-suffix', 'align']) train_translation_model( data_dir, 'transformer_align', [ '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment' ], run_validation=True, ) generate_main(data_dir) class TestStories(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ '--encoder-layers', '[(128, 3)] * 2', '--decoder-layers', '[(128, 3)] * 2', '--decoder-attention', 'True', '--encoder-attention', 'False', '--gated-attention', 'True', '--self-attention', 'True', '--project-input', 'True', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--multihead-self-attention-nheads', '2' ] train_translation_model(data_dir, 'fconv_self_att_wp', config) generate_main(data_dir) # fusion model os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt')) config.extend([ '--pretrained', 'True', '--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'), '--save-dir', os.path.join(data_dir, 'fusion_model'), ]) train_translation_model(data_dir, 'fconv_self_att_wp', config) class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'fconv_lm', [ '--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]', '--decoder-embed-dim', '280', '--optimizer', 'nag', '--lr', '0.1', ]) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_lm_residuals') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, 'lstm_lm', ['--add-bos-token', '--residuals'], run_validation=True, ) eval_lm_main(data_dir) generate_main(data_dir, [ '--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500', ]) class TestMaskedLanguageModel(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_legacy_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model(data_dir, "masked_lm") def test_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm(data_dir, "roberta_base") def test_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, 'input0')) preprocess_lm_data(os.path.join(data_dir, 'label')) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) def test_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_regression_single") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True) preprocess_lm_data(os.path.join(data_dir, 'input0')) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target']) def test_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_regression_multiple") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True) preprocess_lm_data(os.path.join(data_dir, 'input0')) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target']) def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model( data_dir, arch="masked_lm", extra_args=('--encoder-learned-pos',) if learned_pos_emb else () ) with tempfile.TemporaryDirectory( "test_mlm_translation" ) as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data( translation_dir, extra_flags=["--joined-dictionary"] ) # Train transformer with data_dir/checkpoint_last.pt train_translation_model( translation_dir, arch="transformer_from_pretrained_xlm", extra_flags=[ "--decoder-layers", "1", "--decoder-embed-dim", "32", "--decoder-attention-heads", "1", "--decoder-ffn-embed-dim", "32", "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", "--pretrained-xlm-checkpoint", "{}/checkpoint_last.pt".format(data_dir), "--activation-fn", "gelu", "--max-source-positions", "500", "--max-target-positions", "500", ] + ( ["--encoder-learned-pos", "--decoder-learned-pos"] if learned_pos_emb else [] ) + (['--init-encoder-only'] if encoder_only else []), task="translation_from_pretrained_xlm", ) def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): self._test_pretrained_masked_lm_for_translation(True, False) def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): self._test_pretrained_masked_lm_for_translation(False, False) def test_pretrained_masked_lm_for_translation_encoder_only(self): self._test_pretrained_masked_lm_for_translation(True, True) def train_legacy_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "legacy_masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", ] + list(extra_args), ) train.main(train_args) class TestOptimizers(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_optimizers(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_optimizers') as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
true
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_backtranslation_dataset.py
tests/test_backtranslation_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, TransformEosDataset, ) from fairseq.sequence_generator import SequenceGenerator import tests.utils as test_utils class TestBacktranslationDataset(unittest.TestCase): def setUp(self): self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = ( test_utils.sequence_generator_setup() ) dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) self.cuda = torch.cuda.is_available() def _backtranslation_dataset_helper( self, remove_eos_from_input_src, remove_eos_from_output_src, ): tgt_dataset = LanguagePairDataset( src=self.tgt_dataset, src_sizes=self.tgt_dataset.sizes, src_dict=self.tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None, ) generator = SequenceGenerator( [self.model], tgt_dict=self.tgt_dict, max_len_a=0, max_len_b=200, beam_size=2, unk_penalty=0, ) backtranslation_dataset = BacktranslationDataset( tgt_dataset=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # remove eos from the input src remove_eos_from_src=remove_eos_from_input_src, ), src_dict=self.tgt_dict, backtranslation_fn=( lambda sample: generator.generate([self.model], sample) ), output_collater=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # if we remove eos from the input src, then we need to add it # back to the output tgt append_eos_to_tgt=remove_eos_from_input_src, remove_eos_from_src=remove_eos_from_output_src, ).collater, cuda=self.cuda, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) if remove_eos_from_output_src: expected_src = expected_src[:, :-1] expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_backtranslation_dataset_no_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=True, ) def test_backtranslation_dataset_with_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=False, ) def test_backtranslation_dataset_no_eos_in_input_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=True, remove_eos_from_output_src=False, ) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_character_token_embedder.py
tests/test_character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import unittest from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol('hello') vocab.add_symbol('there') embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2) test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_average_checkpoints.py
tests/test_average_checkpoints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import os import tempfile import unittest import shutil import numpy as np import torch from torch import nn from scripts.average_checkpoints import average_checkpoints class ModelWithSharedParameter(nn.Module): def __init__(self): super(ModelWithSharedParameter, self).__init__() self.embedding = nn.Embedding(1000, 200) self.FC1 = nn.Linear(200, 200) self.FC2 = nn.Linear(200, 200) # tie weight in FC2 to FC1 self.FC2.weight = nn.Parameter(self.FC1.weight) self.FC2.bias = nn.Parameter(self.FC1.bias) self.relu = nn.ReLU() def forward(self, input): return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input) class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ('a', torch.DoubleTensor([100.0])), ('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ('c', torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ('a', torch.DoubleTensor([1.0])), ('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ('c', torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ('a', torch.DoubleTensor([50.5])), ('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ('c', torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([('model', params_0)]), path_0) torch.save(collections.OrderedDict([('model', params_1)]), path_1) output = average_checkpoints([path_0, path_1])['model'] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items()): self.assertEqual( k_expected, k_out, 'Key mismatch - expected {} but found {}. ' '(Expected list of keys: {} vs actual list of keys: {})'.format( k_expected, k_out, params_avg.keys(), output.keys() ) ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg='Tensor value mismatch for key {}'.format(k_expected) ) def test_average_checkpoints_with_shared_parameters(self): def _construct_model_with_shared_parameters(path, value): m = ModelWithSharedParameter() nn.init.constant_(m.FC1.weight, value) torch.save( {'model': m.state_dict()}, path ) return m tmpdir = tempfile.mkdtemp() paths = [] path = os.path.join(tmpdir, "m1.pt") m1 = _construct_model_with_shared_parameters(path, 1.0) paths.append(path) path = os.path.join(tmpdir, "m2.pt") m2 = _construct_model_with_shared_parameters(path, 2.0) paths.append(path) path = os.path.join(tmpdir, "m3.pt") m3 = _construct_model_with_shared_parameters(path, 3.0) paths.append(path) new_model = average_checkpoints(paths) self.assertTrue( torch.equal( new_model['model']['embedding.weight'], (m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0 ) ) self.assertTrue( torch.equal( new_model['model']['FC1.weight'], (m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0 ) ) self.assertTrue( torch.equal( new_model['model']['FC2.weight'], (m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0 ) ) shutil.rmtree(tmpdir) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_resampling_dataset.py
tests/test_resampling_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import unittest import numpy as np from fairseq.data import ListDataset, ResamplingDataset class TestResamplingDataset(unittest.TestCase): def setUp(self): self.strings = ["ab", "c", "def", "ghij"] self.weights = [4.0, 2.0, 7.0, 1.5] self.size_ratio = 2 self.dataset = ListDataset( self.strings, np.array([len(s) for s in self.strings]) ) def _test_common(self, resampling_dataset, iters): assert len(self.dataset) == len(self.strings) == len(self.weights) assert len(resampling_dataset) == self.size_ratio * len(self.strings) results = {"ordered_by_size": True, "max_distribution_diff": 0.0} totalfreqs = 0 freqs = collections.defaultdict(int) for epoch_num in range(iters): resampling_dataset.set_epoch(epoch_num) indices = resampling_dataset.ordered_indices() assert len(indices) == len(resampling_dataset) prev_size = -1 for i in indices: cur_size = resampling_dataset.size(i) # Make sure indices map to same sequences within an epoch assert resampling_dataset[i] == resampling_dataset[i] # Make sure length of sequence is correct assert cur_size == len(resampling_dataset[i]) freqs[resampling_dataset[i]] += 1 totalfreqs += 1 if prev_size > cur_size: results["ordered_by_size"] = False prev_size = cur_size assert set(freqs.keys()) == set(self.strings) for s, weight in zip(self.strings, self.weights): freq = freqs[s] / totalfreqs expected_freq = weight / sum(self.weights) results["max_distribution_diff"] = max( results["max_distribution_diff"], abs(expected_freq - freq) ) return results def test_resampling_dataset_batch_by_size_false(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=False, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = False, the batches should be returned in # arbitrary order of size. assert not results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 def test_resampling_dataset_batch_by_size_true(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=True, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = True, the batches should be returned in # increasing order of size. assert results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_iterators.py
tests/test_iterators.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fairseq.data import iterators class TestIterators(unittest.TestCase): def test_counting_iterator(self, ref=None, itr=None): if ref is None: assert itr is None ref = list(range(10)) itr = iterators.CountingIterator(ref) else: assert len(ref) == 10 assert itr is not None self.assertTrue(itr.has_next()) self.assertEqual(itr.n, 0) self.assertEqual(next(itr), ref[0]) self.assertEqual(itr.n, 1) self.assertEqual(next(itr), ref[1]) self.assertEqual(itr.n, 2) itr.skip(3) self.assertEqual(itr.n, 5) self.assertEqual(next(itr), ref[5]) itr.skip(3) self.assertEqual(itr.n, 9) self.assertEqual(next(itr), ref[9]) self.assertFalse(itr.has_next()) def test_grouped_iterator(self): # test correctness x = list(range(10)) itr = iterators.GroupedIterator(x, 1) self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]) itr = iterators.GroupedIterator(x, 4) self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]) itr = iterators.GroupedIterator(x, 5) self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) # test CountingIterator functionality x = list(range(30)) ref = list(iterators.GroupedIterator(x, 3)) itr = iterators.GroupedIterator(x, 3) self.test_counting_iterator(ref, itr) def test_sharded_iterator(self): # test correctness x = list(range(10)) itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0) self.assertEqual(list(itr), x) itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0) self.assertEqual(list(itr), [0, 2, 4, 6, 8]) itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1) self.assertEqual(list(itr), [1, 3, 5, 7, 9]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) self.assertEqual(list(itr), [0, 3, 6, 9]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1) self.assertEqual(list(itr), [1, 4, 7, None]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2) self.assertEqual(list(itr), [2, 5, 8, None]) # test CountingIterator functionality x = list(range(30)) ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0)) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) self.test_counting_iterator(ref, itr) def test_counting_iterator_take(self): ref = list(range(10)) itr = iterators.CountingIterator(ref) itr.take(5) self.assertEqual(len(itr), len(list(iter(itr)))) self.assertEqual(len(itr), 5) itr = iterators.CountingIterator(ref) itr.take(5) self.assertEqual(next(itr), ref[0]) self.assertEqual(next(itr), ref[1]) itr.skip(2) self.assertEqual(next(itr), ref[4]) self.assertFalse(itr.has_next()) def test_counting_iterator_buffered_iterator_take(self): ref = list(range(10)) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(len(itr), len(list(iter(itr)))) self.assertEqual(len(itr), 5) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(len(buffered_itr), 5) self.assertEqual(len(list(iter(buffered_itr))), 5) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(next(itr), ref[0]) self.assertEqual(next(itr), ref[1]) itr.skip(2) self.assertEqual(next(itr), ref[4]) self.assertFalse(itr.has_next()) self.assertRaises(StopIteration, next, buffered_itr) ref = list(range(4,10)) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr, start=4) itr.take(5) self.assertEqual(len(itr), 5) self.assertEqual(len(buffered_itr), 1) self.assertEqual(next(itr), ref[0]) self.assertFalse(itr.has_next()) self.assertRaises(StopIteration, next, buffered_itr) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_train.py
tests/test_train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import unittest from io import StringIO from unittest.mock import MagicMock, patch import torch from fairseq import data, checkpoint_utils def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { 'train_iterator': { 'epoch': epoch, 'iterations_in_epoch': iterations_in_epoch, 'shuffle': False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr def get_mock_args(finetune_from_model=None): args_mock = MagicMock() args_mock.optimizer_overrides = '{}' args_mock.reset_dataloader = False args_mock.reset_meters = False args_mock.reset_optimizer = False args_mock.reset_lr_scheduler = False args_mock.finetune_from_model = finetune_from_model args_mock.model_parallel_size = 1 return args_mock class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.args_mock = get_mock_args() self.patches = { 'os.makedirs': MagicMock(), 'os.path.join': MagicMock(), 'os.path.isfile': MagicMock(return_value=True), 'os.path.isabs': MagicMock(return_value=False), 'fairseq.file_io.PathManager.exists': MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] logging.disable(logging.CRITICAL) def tearDown(self): patch.stopall() logging.disable(logging.NOTSET) def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches['os.path.isfile'].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0) def test_finetune_from_model_args_conflict(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) for arg in ['reset_optimizer', 'reset_lr_scheduler', 'reset_meters', 'reset_dataloader']: with self.subTest(arg=arg): args_mock = get_mock_args("/temp/checkpoint_pretrained.pt") setattr(args_mock, arg, True) with self.assertRaises(Exception) as context: _, _ = checkpoint_utils.load_checkpoint(args_mock, trainer) self.assertTrue( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" in str(context.exception) ) def test_finetune_from_model(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" args_mock = get_mock_args(from_model_path) args_mock.restore_file = "checkpoint_last.pt" def mock_finetune_exist(path): if path == from_model_path: return True else: return False self.patches['fairseq.file_io.PathManager.exists'].side_effect = mock_finetune_exist _, _ = checkpoint_utils.load_checkpoint(args_mock, trainer) checkpoint_path, reset_optimizer, reset_lr_scheduler, \ optimizer_overrides = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]['reset_meters'] self.assertTrue(reset_optimizer) self.assertTrue(reset_lr_scheduler) self.assertTrue(reset_meters) def test_finetune_from_model_resume(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" args_mock = get_mock_args(from_model_path) args_mock.restore_file = "checkpoint_last.pt" # launch second time # both restore_file=checkpoint_last.pt and finetune_from_model are set def mock_finetune_exist(path): if path == from_model_path or path.endsWith('checkpoint_last.pt'): return True else: return False self.patches['fairseq.file_io.PathManager.exists'].side_effect = mock_finetune_exist _, _ = checkpoint_utils.load_checkpoint(args_mock, trainer) checkpoint_path, reset_optimizer, reset_lr_scheduler, \ optimizer_overrides = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]['reset_meters'] self.assertFalse(reset_optimizer) self.assertFalse(reset_lr_scheduler) self.assertFalse(reset_meters) if __name__ == '__main__': unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_multi_corpus_sampled_dataset.py
tests/test_multi_corpus_sampled_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import numpy as np import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from tests.test_train import mock_dict class TestMultiCorpusSampledDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None, ): # To make sure test is not flaky np.random.seed(0) if sampling_func is None: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), ) else: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func, ) m.ordered_indices() count_sample_from_first_dataset = 0 for _ in range(num_samples): if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1: count_sample_from_first_dataset += 1 sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / num_samples ) self.assertLess( abs( sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage ), 0.01, ) def test_multi_corpus_sampled_dataset_uniform_sample(self): self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5) def test_multi_corpus_sampled_dataset_weighted_sample(self): def naive_weighted_sample(weights): def f(l): v = np.random.random() agg = 0 for i, weight in enumerate(weights): agg += weight if agg > v: return i return f self._test_sample_helper( expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]), )
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/test_constraints.py
tests/test_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import torch import unittest from fairseq.token_generation_constraints import * def tensorize(constraints: List[List[int]]) -> torch.Tensor: return [torch.tensor(x) for x in constraints] class TestHelperRoutines(unittest.TestCase): def setUp(self): self.examples = [ ( [[]], torch.tensor([[0]]) ), ( [[], []], torch.tensor([[0], [0]]) ), ( [[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]]) ), ( [[torch.tensor([3, 1, 2]), torch.tensor([3]), torch.tensor([4, 5, 6, 7])], [], [ torch.tensor([1, 8, 9, 10, 1, 4, 11, 12]) ]], torch.tensor([[3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0]]) ) ] def test_packing(self): """Ensures the list of lists of tensors gets packed correctly.""" for batch_constraints, expected_tensor in self.examples: packed = pack_constraints(batch_constraints) assert torch.equal(packed, expected_tensor) class TestUnorderedConstraintState(unittest.TestCase): def setUp(self): # Tuples of (contraint set, expected printed graph, token counts per node) self.examples = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", { 1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1 } ), ( [], "[None].False#0", {} ), ( tensorize([[0]]), "([None].False#1 [0].True#1)", { 0: 1 } ), ( tensorize([[100000, 1, 2, 3, 4, 5]]), "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", { 100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1 } ), ( tensorize([[1, 2], [1, 2]]), "([None].False#2 ([1].False#2 [2].True#2))", { 1: 2, 2: 2 }, ), ( tensorize([[1, 2], [3, 4]]), "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", { 1: 1, 2: 1, 3: 1, 4: 1}, ), ] self.sequences = [ ( self.examples[0][0], [], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( self.examples[0][0], [1, 2], { "bank": 2, "num_completed": 0, "finished": False, "is_root": False }, ), ( self.examples[0][0], [1, 2, 94], { "bank": 1, "num_completed": 1, "finished": False, "is_root": True }, ), ( self.examples[0][0], [1, 3, 999, 1, 4], { "bank": 4, "num_completed": 2, "finished": False, "is_root": False }, ), ( self.examples[0][0], [1, 3, 999, 1, 4, 999], { "bank": 4, "num_completed": 2, "finished": False, "is_root": True }, ), ( self.examples[0][0], [4, 5, 6, 8], { "bank": 2, "num_completed": 1, "finished": False, "is_root": True }, ), ( self.examples[0][0], # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], { "bank": 14, "num_completed": 6, "finished": True, "is_root": False }, ), ( self.examples[0][0], [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], { "bank": 14, "num_completed": 6, "finished": True, "is_root": True }, ), ( tensorize([[1], [2, 3]]), # Should not be able to get credit for entering 1 a second time [1, 1], { "bank": 1, "num_completed": 1, "finished": False, "is_root": True }, ), ( self.examples[4][0], [1, 2, 1, 2], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ( self.examples[4][0], [1, 2, 1, 2, 1], { "bank": 4, "num_completed": 2, "finished": True, "is_root": True }, ), ( self.examples[5][0], [1, 2, 3, 4, 5], { "bank": 4, "num_completed": 2, "finished": True, "is_root": True }, ), ] def test_graphs(self): """ Test whether unordered graph systems are created correctly. """ for example in self.examples: constraints, expected, gold_counts = example c = ConstraintNode.create(constraints) assert ConstraintNode.print_graph(c) == expected, f"got {ConstraintNode.print_graph(c)}, expected {expected}" assert c.token_counts() == gold_counts, f"{c} got {c.token_counts()} wanted {gold_counts}" def test_next_tokens(self): """ Tests that the set of next tokens is correct. """ for example in self.examples: constraints, expected, gold_counts = example root = ConstraintNode.create(constraints) root_tokens = set(root.children.keys()) for sequence in constraints: state = UnorderedConstraintState(root) for token in sequence: all_tokens = root_tokens.union(state.node.children.keys()) assert all_tokens == state.next_tokens(), f"ALL {all_tokens} NEXT {state.next_tokens()}" state = state.advance(token) def test_sequences(self): for constraints, tokens, expected in self.sequences: state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert result == expected, f"TEST({tokens}) GOT: {result} WANTED: {expected}" class TestOrderedConstraintState(unittest.TestCase): def setUp(self): self.sequences = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2], { "bank": 2, "num_completed": 0, "finished": False, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 94], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 3, 999, 1, 4], { "bank": 0, "num_completed": 0, "finished": False, "is_root": True }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 999, 999], { "bank": 3, "num_completed": 1, "finished": False, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 77, 1, 3, 1], { "bank": 6, "num_completed": 2, "finished": False, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], { "bank": 14, "num_completed": 6, "finished": True, "is_root": False }, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], { "bank": 14, "num_completed": 6, "finished": True, "is_root": False }, ), ( tensorize([[1], [2, 3]]), [1, 1], { "bank": 1, "num_completed": 1, "finished": False, "is_root": False }, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2, 1], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ( tensorize([[1, 2], [3, 4]]), [1, 2, 3, 4, 5], { "bank": 4, "num_completed": 2, "finished": True, "is_root": False }, ), ] def test_sequences(self): for i, (constraints, tokens, expected) in enumerate(self.sequences): state = OrderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert result == expected, f"TEST({tokens}) GOT: {result} WANTED: {expected}" if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/gpu/test_binaries_gpu.py
tests/gpu/test_binaries_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO import torch from fairseq import options from fairseq_cli import train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_translation_data, train_translation_model, ) class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en", ["--fp16"]) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"] ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--fp16", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_levenshtein_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "levenshtein_transformer", [ "--apply-bert-init", "--early-exit", "6,6,6", "--criterion", "nat_loss", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", 0, ] + (extra_flags or []), ) train.main(train_args) # try scalar quantization scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch( scalar_quant_train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-update", "3", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", 0, "--quant-noise-scalar", "0.5", ] + (extra_flags or []), ) train.main(scalar_quant_train_args) # try iterative PQ quantization quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch( quantize_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "50", "--tokens-per-sample", "50", "--max-update", "6", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", 0, "--restore-file", os.path.join(data_dir, "checkpoint_last.pt"), "--reset-optimizer", "--quantization-config-path", os.path.join( os.path.dirname(__file__), "transformer_quantization_config.yaml" ), ] + (extra_flags or []), ) train.main(quantize_args) class TestQuantization(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_quantization(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_quantization") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) # tests both scalar and iterative PQ quantization _quantize_language_model(data_dir, "transformer_lm") class TestOptimizersGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_flat_grads(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_flat_grads") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) with self.assertRaises(RuntimeError): # adafactor isn't compatible with flat grads, which # are used by default with --fp16 train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", ], ) # but it should pass once we set --fp16-no-flatten-grads train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", "--fp16-no-flatten-grads", ], ) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/gpu/__init__.py
tests/gpu/__init__.py
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/speech_recognition/test_vggtransformer.py
tests/speech_recognition/test_vggtransformer.py
#!/usr/bin/env python3 # import models/encoder/decoder to be tested from examples.speech_recognition.models.vggtransformer import ( TransformerDecoder, VGGTransformerEncoder, VGGTransformerModel, vggtransformer_1, vggtransformer_2, vggtransformer_base, ) # import base test class from .asr_test_base import ( DEFAULT_TEST_VOCAB_SIZE, TestFairseqDecoderBase, TestFairseqEncoderBase, TestFairseqEncoderDecoderModelBase, get_dummy_dictionary, get_dummy_encoder_output, get_dummy_input, ) class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_1 use 14 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_1, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_2 use 16 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_2, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase): def setUp(self): def override_config(args): """ vggtrasformer_base use 12 layers of transformer, for testing purpose, it is too expensive. For fast turn-around test, reduce the number of layers to 3. """ args.transformer_enc_config = ( "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3" ) super().setUp() extra_args_setter = [vggtransformer_base, override_config] self.setUpModel(VGGTransformerModel, extra_args_setter) self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE)) class VGGTransformerEncoderTest(TestFairseqEncoderBase): def setUp(self): super().setUp() self.setUpInput(get_dummy_input(T=50, D=80, B=5)) def test_forward(self): print("1. test standard vggtransformer") self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80)) super().test_forward() print("2. test vggtransformer with limited right context") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(-1, 5) ) ) super().test_forward() print("3. test vggtransformer with limited left context") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(5, -1) ) ) super().test_forward() print("4. test vggtransformer with limited right context and sampling") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(-1, 12), transformer_sampling=(2, 2), ) ) super().test_forward() print("5. test vggtransformer with windowed context and sampling") self.setUpEncoder( VGGTransformerEncoder( input_feat_per_channel=80, transformer_context=(12, 12), transformer_sampling=(2, 2), ) ) class TransformerDecoderTest(TestFairseqDecoderBase): def setUp(self): super().setUp() dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE) decoder = TransformerDecoder(dict) dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256)) self.setUpDecoder(decoder) self.setUpInput(dummy_encoder_output) self.setUpPrevOutputTokens()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/speech_recognition/test_data_utils.py
tests/speech_recognition/test_data_utils.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from examples.speech_recognition.data import data_utils class DataUtilsTest(unittest.TestCase): def test_normalization(self): sample_len1 = torch.tensor([[-0.7661, -1.3889, -2.0972, -0.9134, -0.7071, -0.9765, -0.8700, -0.8283, 0.7512, 1.3211, 2.1532, 2.1174, 1.2800, 1.2633, 1.6147, 1.6322, 2.0723, 3.1522, 3.2852, 2.2309, 2.5569, 2.2183, 2.2862, 1.5886, 0.8773, 0.8725, 1.2662, 0.9899, 1.1069, 1.3926, 1.2795, 1.1199, 1.1477, 1.2687, 1.3843, 1.1903, 0.8355, 1.1367, 1.2639, 1.4707]]) out = data_utils.apply_mv_norm(sample_len1) assert not torch.isnan(out).any() assert (out == sample_len1).all()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/speech_recognition/asr_test_base.py
tests/speech_recognition/asr_test_base.py
#!/usr/bin/env python3 import argparse import os import unittest from inspect import currentframe, getframeinfo import numpy as np import torch from fairseq.data import data_utils as fairseq_data_utils from fairseq.data.dictionary import Dictionary from fairseq.models import ( BaseFairseqModel, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqModel, ) from fairseq.tasks.fairseq_task import LegacyFairseqTask from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask DEFAULT_TEST_VOCAB_SIZE = 100 # /////////////////////////////////////////////////////////////////////////// # utility function to setup dummy dict/task/input # /////////////////////////////////////////////////////////////////////////// def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.tgt_dict = self.dictionary @property def target_dictionary(self): return self.dictionary def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def get_dummy_input(T=100, D=80, B=5, K=100): forward_input = {} # T max sequence length # D feature vector dimension # B batch size # K target dimension size feature = torch.randn(B, T, D) # this (B, T, D) layout is just a convention, you can override it by # write your own _prepare_forward_input function src_lengths = torch.from_numpy( np.random.randint(low=1, high=T, size=B, dtype=np.int64) ) src_lengths[0] = T # make sure the maximum length matches prev_output_tokens = [] for b in range(B): token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1) tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64) prev_output_tokens.append(torch.from_numpy(tokens)) prev_output_tokens = fairseq_data_utils.collate_tokens( prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False, ) src_lengths, sorted_order = src_lengths.sort(descending=True) forward_input["src_tokens"] = feature.index_select(0, sorted_order) forward_input["src_lengths"] = src_lengths forward_input["prev_output_tokens"] = prev_output_tokens return forward_input def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)): """ This only provides an example to generate dummy encoder output """ (T, B, D) = encoder_out_shape encoder_out = {} encoder_out["encoder_out"] = torch.from_numpy( np.random.randn(*encoder_out_shape).astype(np.float32) ) seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B)) # some dummy mask encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand( B, -1 ) >= seq_lengths.view(B, 1).expand(-1, T) encoder_out["encoder_padding_mask"].t_() # encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate # whether encoder_out[t, b] is valid (=0) or not (=1) return encoder_out def _current_postion_info(): cf = currentframe() frameinfo = " (at {}:{})".format( os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno ) return frameinfo def check_encoder_output(encoder_output, batch_size=None): """we expect encoder_output to be a dict with the following key/value pairs: - encoder_out: a Torch.Tensor - encoder_padding_mask: a binary Torch.Tensor """ if not isinstance(encoder_output, dict): msg = ( "FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info() ) return False, msg if "encoder_out" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_out" + _current_postion_info() ) return False, msg if "encoder_padding_mask" not in encoder_output: msg = ( "FairseqEncoderModel.forward(...) must contain encoder_padding_mask" + _current_postion_info() ) return False, msg if not isinstance(encoder_output["encoder_out"], torch.Tensor): msg = "encoder_out must be a torch.Tensor" + _current_postion_info() return False, msg if encoder_output["encoder_out"].dtype != torch.float32: msg = "encoder_out must have float32 dtype" + _current_postion_info() return False, msg mask = encoder_output["encoder_padding_mask"] if mask is not None: if not isinstance(mask, torch.Tensor): msg = ( "encoder_padding_mask must be a torch.Tensor" + _current_postion_info() ) return False, msg if ( mask.dtype != torch.uint8 and (not hasattr(torch, 'bool') or mask.dtype != torch.bool) ): msg = ( "encoder_padding_mask must have dtype of uint8" + _current_postion_info() ) return False, msg if mask.dim() != 2: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)" + _current_postion_info() ) return False, msg if batch_size is not None and mask.size(1) != batch_size: msg = ( "we expect encoder_padding_mask to be a 2-d tensor, with size(1)" + " being the batch size" + _current_postion_info() ) return False, msg return True, None def check_decoder_output(decoder_output): """we expect output from a decoder is a tuple with the following constraint: - the first element is a torch.Tensor - the second element can be anything (reserved for future use) """ if not isinstance(decoder_output, tuple): msg = "FariseqDecoder output must be a tuple" + _current_postion_info() return False, msg if len(decoder_output) != 2: msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info() return False, msg if not isinstance(decoder_output[0], torch.Tensor): msg = ( "FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info() ) return False, msg return True, None # /////////////////////////////////////////////////////////////////////////// # Base Test class # /////////////////////////////////////////////////////////////////////////// class TestBaseFairseqModelBase(unittest.TestCase): """ This class is used to facilitate writing unittest for any class derived from `BaseFairseqModel`. """ @classmethod def setUpClass(cls): if cls is TestBaseFairseqModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model): self.assertTrue(isinstance(model, BaseFairseqModel)) self.model = model def setupInput(self): pass def setUp(self): self.model = None self.forward_input = None pass class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase): """ base code to test FairseqEncoderDecoderModel (formally known as `FairseqModel`) must be derived from this base class """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderDecoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)), msg="This class only tests for FairseqModel subclasses", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input def setUp(self): super().setUp() def test_forward(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) # for FairseqEncoderDecoderModel, forward returns a tuple of two # elements, the first one is a Torch.Tensor succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderModelBase(TestBaseFairseqModelBase): """ base class to test FairseqEncoderModel """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderModelBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpModel(self, model_cls, extra_args_setters=None): self.assertTrue( issubclass(model_cls, FairseqEncoderModel), msg="This class is only used for testing FairseqEncoderModel", ) task, parser = get_dummy_task_and_parser() model_cls.add_args(parser) args = parser.parse_args([]) if extra_args_setters is not None: for args_setter in extra_args_setters: args_setter(args) model = model_cls.build_model(args, task) self.model = model def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): super().setUp() def test_forward(self): if self.forward_input and self.model: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.model.forward(**self.forward_input) # we expect forward_output to be a dict with the following # key/value pairs: # - encoder_out: a Torch.Tensor # - encoder_padding_mask: a binary Torch.Tensor succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output def test_get_normalized_probs(self): if self.model and self.forward_input: forward_output = self.model.forward(**self.forward_input) logprob = self.model.get_normalized_probs(forward_output, log_probs=True) prob = self.model.get_normalized_probs(forward_output, log_probs=False) # in order for different models/criterion to play with each other # we need to know whether the logprob or prob output is batch_first # or not. We assume an additional attribute will be attached to logprob # or prob. If you find your code failed here, simply override # FairseqModel.get_normalized_probs, see example at # https://fburl.com/batch_first_example self.assertTrue(hasattr(logprob, "batch_first")) self.assertTrue(hasattr(prob, "batch_first")) self.assertTrue(torch.is_tensor(logprob)) self.assertTrue(torch.is_tensor(prob)) class TestFairseqEncoderBase(unittest.TestCase): """ base class to test FairseqEncoder """ @classmethod def setUpClass(cls): if cls is TestFairseqEncoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpEncoder(self, encoder): self.assertTrue( isinstance(encoder, FairseqEncoder), msg="This class is only used for test FairseqEncoder", ) self.encoder = encoder def setUpInput(self, input=None): self.forward_input = get_dummy_input() if input is None else input # get_dummy_input() is originally for s2s, here we delete extra dict # items, so it can be used for EncoderModel / Encoder as well self.forward_input.pop("prev_output_tokens", None) def setUp(self): self.encoder = None self.forward_input = None def test_forward(self): if self.encoder and self.forward_input: bsz = self.forward_input["src_tokens"].size(0) forward_output = self.encoder.forward(**self.forward_input) succ, msg = check_encoder_output(forward_output, batch_size=bsz) if not succ: self.assertTrue(succ, msg=msg) self.forward_output = forward_output class TestFairseqDecoderBase(unittest.TestCase): """ base class to test FairseqDecoder """ @classmethod def setUpClass(cls): if cls is TestFairseqDecoderBase: raise unittest.SkipTest("Skipping test case in base") super().setUpClass() def setUpDecoder(self, decoder): self.assertTrue( isinstance(decoder, FairseqDecoder), msg="This class is only used for test FairseqDecoder", ) self.decoder = decoder def setUpInput(self, input=None): self.forward_input = get_dummy_encoder_output() if input is None else input def setUpPrevOutputTokens(self, tokens=None): if tokens is None: self.encoder_input = get_dummy_input() self.prev_output_tokens = self.encoder_input["prev_output_tokens"] else: self.prev_output_tokens = tokens def setUp(self): self.decoder = None self.forward_input = None self.prev_output_tokens = None def test_forward(self): if ( self.decoder is not None and self.forward_input is not None and self.prev_output_tokens is not None ): forward_output = self.decoder.forward( prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input, ) succ, msg = check_decoder_output(forward_output) if not succ: self.assertTrue(succ, msg=msg) self.forward_input = forward_output class DummyEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @classmethod def build_model(cls, args, task): return cls(DummyEncoder()) def get_logits(self, net_output): # Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as # F.binary_cross_entropy_with_logits combines sigmoid and CE return torch.log( torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"]) ) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample) lprobs.batch_first = True return lprobs class DummyEncoder(FairseqEncoder): def __init__(self): super().__init__(None) def forward(self, src_tokens, src_lengths): mask, max_len = lengths_to_encoder_padding_mask(src_lengths) return {"encoder_out": src_tokens, "encoder_padding_mask": mask} class CrossEntropyCriterionTestBase(unittest.TestCase): @classmethod def setUpClass(cls): if cls is CrossEntropyCriterionTestBase: raise unittest.SkipTest("Skipping base class test case") super().setUpClass() def setUpArgs(self): args = argparse.Namespace() args.sentence_avg = False args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion return args def setUp(self): args = self.setUpArgs() self.model = DummyEncoderModel(encoder=DummyEncoder()) self.criterion = self.criterion_cls.build_criterion(args=args, task=DummyTask(args)) def get_src_tokens(self, correct_prediction, aggregate): """ correct_prediction: True if the net_output (src_tokens) should predict the correct target aggregate: True if the criterion expects net_output (src_tokens) aggregated across time axis """ predicted_idx = 0 if correct_prediction else 1 if aggregate: src_tokens = torch.zeros((2, 2), dtype=torch.float) for b in range(2): src_tokens[b][predicted_idx] = 1.0 else: src_tokens = torch.zeros((2, 10, 2), dtype=torch.float) for b in range(2): for t in range(10): src_tokens[b][t][predicted_idx] = 1.0 return src_tokens def get_target(self, soft_target): if soft_target: target = torch.zeros((2, 2), dtype=torch.float) for b in range(2): target[b][0] = 1.0 else: target = torch.zeros((2, 10), dtype=torch.long) return target def get_test_sample(self, correct, soft_target, aggregate): src_tokens = self.get_src_tokens(correct, aggregate) target = self.get_target(soft_target) L = src_tokens.size(1) return { "net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])}, "target": target, "ntokens": src_tokens.size(0) * src_tokens.size(1), }
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/speech_recognition/__init__.py
tests/speech_recognition/__init__.py
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/speech_recognition/test_collaters.py
tests/speech_recognition/test_collaters.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np import torch from examples.speech_recognition.data.collaters import Seq2SeqCollater class TestSeq2SeqCollator(unittest.TestCase): def test_collate(self): eos_idx = 1 pad_idx = 0 collater = Seq2SeqCollater( feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx ) # 2 frames in the first sample and 3 frames in the second one frames1 = np.array([[7, 8], [9, 10]]) frames2 = np.array([[1, 2], [3, 4], [5, 6]]) target1 = np.array([4, 2, 3, eos_idx]) target2 = np.array([3, 2, eos_idx]) sample1 = {"id": 0, "data": [frames1, target1]} sample2 = {"id": 1, "data": [frames2, target2]} batch = collater.collate([sample1, sample2]) # collate sort inputs by frame's length before creating the batch self.assertTensorEqual(batch["id"], torch.tensor([1, 0])) self.assertEqual(batch["ntokens"], 7) self.assertTensorEqual( batch["net_input"]["src_tokens"], torch.tensor( [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]] ), ) self.assertTensorEqual( batch["net_input"]["prev_output_tokens"], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]), ) self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2])) self.assertTensorEqual( batch["target"], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]), ) self.assertEqual(batch["nsentences"], 2) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/tests/speech_recognition/test_cross_entropy.py
tests/speech_recognition/test_cross_entropy.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from examples.speech_recognition.criterions.cross_entropy_acc import CrossEntropyWithAccCriterion from .asr_test_base import CrossEntropyCriterionTestBase class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase): def setUp(self): self.criterion_cls = CrossEntropyWithAccCriterion super().setUp() def test_cross_entropy_all_correct(self): sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False) loss, sample_size, logging_output = self.criterion( self.model, sample, "sum", log_probs=True ) assert logging_output["correct"] == 20 assert logging_output["total"] == 20 assert logging_output["sample_size"] == 20 assert logging_output["ntokens"] == 20 def test_cross_entropy_all_wrong(self): sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False) loss, sample_size, logging_output = self.criterion( self.model, sample, "sum", log_probs=True ) assert logging_output["correct"] == 0 assert logging_output["total"] == 20 assert logging_output["sample_size"] == 20 assert logging_output["ntokens"] == 20
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/docs/conf.py
docs/conf.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # fairseq documentation build configuration file, created by # sphinx-quickstart on Fri Aug 17 21:45:30 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys # source code directory, relative to this file, for sphinx-autobuild sys.path.insert(0, os.path.abspath('..')) source_suffix = ['.rst'] # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinxarg.ext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The master toctree document. master_doc = 'index' # General information about the project. project = 'fairseq' copyright = '2019, Facebook AI Research (FAIR)' author = 'Facebook AI Research (FAIR)' github_doc_root = 'https://github.com/pytorch/fairseq/tree/master/docs/' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.9.0' # The full version, including alpha/beta/rc tags. release = '0.9.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' highlight_language = 'python' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_context = { 'css_files': [ '_static/theme_overrides.css', # override wide tables in RTD theme ], } # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars #html_sidebars = { # '**': [ # 'about.html', # 'navigation.html', # 'relations.html', # needs 'show_related': True theme option to display # 'searchbox.html', # 'donate.html', # ] #} # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'numpy': ('http://docs.scipy.org/doc/numpy/', None), 'python': ('https://docs.python.org/', None), 'torch': ('https://pytorch.org/docs/master/', None), }
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/__init__.py
examples/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. __version__ = '0.9.0' import examples.noisychannel # noqa
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/byte_level_bpe/get_bitext.py
examples/byte_level_bpe/get_bitext.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os.path as op import argparse import os from multiprocessing import cpu_count from collections import namedtuple from typing import Optional, List import sentencepiece as sp from fairseq.data.encoders.moses_tokenizer import MosesTokenizer from fairseq.data.encoders.byte_utils import byte_encode from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE from fairseq.data.encoders.characters import Characters from fairseq.data.encoders.byte_bpe import ByteBPE from fairseq.data.encoders.bytes import Bytes SPLITS = ['train', 'valid', 'test'] def _convert_xml(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: ss = s.strip() if not ss.startswith('<seg'): continue ss = ss.replace('</seg>', '').split('">') assert len(ss) == 2 f_o.write(ss[1].strip() + '\n') def _convert_train(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: ss = s.strip() if ss.startswith('<'): continue f_o.write(ss.strip() + '\n') def _get_bytes(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(Bytes.encode(s.strip()) + '\n') def _get_chars(in_path: str, out_path: str): with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(Characters.encode(s.strip()) + '\n') def pretokenize(in_path: str, out_path: str, src: str, tgt: str): Args = namedtuple('Args', ['moses_source_lang', 'moses_target_lang', 'moses_no_dash_splits', 'moses_no_escape']) args = Args(moses_source_lang=src, moses_target_lang=tgt, moses_no_dash_splits=False, moses_no_escape=False) pretokenizer = MosesTokenizer(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(pretokenizer.encode(s.strip()) + '\n') def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str): with open(out_path, 'w') as f_o: for lang in [src, tgt]: with open(f'{in_path_prefix}.{lang}') as f: for s in f: f_o.write(byte_encode(s.strip()) + '\n') def _get_bpe(in_path: str, model_prefix: str, vocab_size: int): arguments = [ f'--input={in_path}', f'--model_prefix={model_prefix}', f'--model_type=bpe', f'--vocab_size={vocab_size}', '--character_coverage=1.0', '--normalization_rule_name=identity', f'--num_threads={cpu_count()}' ] sp.SentencePieceTrainer.Train(' '.join(arguments)) def _apply_bbpe(model_path: str, in_path: str, out_path: str): Args = namedtuple('Args', ['sentencepiece_model_path']) args = Args(sentencepiece_model_path=model_path) tokenizer = ByteBPE(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + '\n') def _apply_bpe(model_path: str, in_path: str, out_path: str): Args = namedtuple('Args', ['sentencepiece_model']) args = Args(sentencepiece_model=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + '\n') def _concat_files(in_paths: List[str], out_path: str): with open(out_path, 'w') as f_o: for p in in_paths: with open(p) as f: for r in f: f_o.write(r) def preprocess_iwslt17(root: str, src: str, tgt: str, bpe_size: Optional[int], need_chars: bool, bbpe_size: Optional[int], need_bytes: bool): # extract bitext in_root = op.join(root, f'{src}-{tgt}') for lang in [src, tgt]: _convert_train( op.join(in_root, f'train.tags.{src}-{tgt}.{lang}'), op.join(root, f'train.{lang}') ) _convert_xml( op.join(in_root, f'IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml'), op.join(root, f'valid.{lang}') ) _convert_xml( op.join(in_root, f'IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml'), op.join(root, f'test.{lang}') ) # pre-tokenize for lang in [src, tgt]: for split in SPLITS: pretokenize(op.join(root, f'{split}.{lang}'), op.join(root, f'{split}.moses.{lang}'), src, tgt) # tokenize with BPE vocabulary if bpe_size is not None: # learn vocabulary concated_train_path = op.join(root, 'train.all') _concat_files( [op.join(root, 'train.moses.fr'), op.join(root, 'train.moses.en')], concated_train_path ) bpe_model_prefix = op.join(root, f'spm_bpe{bpe_size}') _get_bpe(concated_train_path, bpe_model_prefix, bpe_size) os.remove(concated_train_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bpe( bpe_model_prefix + '.model', op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bpe{bpe_size}.{lang}') ) # tokenize with bytes vocabulary if need_bytes: for lang in [src, tgt]: for split in SPLITS: _get_bytes(op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bytes.{lang}')) # tokenize with characters vocabulary if need_chars: for lang in [src, tgt]: for split in SPLITS: _get_chars(op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.chars.{lang}')) # tokenize with byte-level BPE vocabulary if bbpe_size is not None: # learn vocabulary bchar_path = op.join(root, 'train.bchar') _convert_to_bchar(op.join(root, 'train.moses'), src, tgt, bchar_path) bbpe_model_prefix = op.join(root, f'spm_bbpe{bbpe_size}') _get_bpe(bchar_path, bbpe_model_prefix, bbpe_size) os.remove(bchar_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bbpe( bbpe_model_prefix + '.model', op.join(root, f'{split}.moses.{lang}'), op.join(root, f'{split}.moses.bbpe{bbpe_size}.{lang}') ) def main(): parser = argparse.ArgumentParser() parser.add_argument('--root', type=str, default='data') parser.add_argument('--bpe-vocab', default=None, type=int, help='Generate tokenized bitext with BPE of size K.' 'Default to None (disabled).') parser.add_argument('--bbpe-vocab', default=None, type=int, help='Generate tokenized bitext with BBPE of size K.' 'Default to None (disabled).') parser.add_argument('--byte-vocab', action='store_true', help='Generate tokenized bitext with bytes vocabulary') parser.add_argument('--char-vocab', action='store_true', help='Generate tokenized bitext with chars vocabulary') args = parser.parse_args() preprocess_iwslt17(args.root, 'fr', 'en', args.bpe_vocab, args.char_vocab, args.bbpe_vocab, args.byte_vocab) if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/byte_level_bpe/gru_transformer.py
examples/byte_level_bpe/gru_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerModel, TransformerEncoder @register_model("gru_transformer") class GRUTransformerModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return GRUTransformerEncoder(args, src_dict, embed_tokens) class GRUTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.emb_ctx = nn.GRU(input_size=embed_tokens.embedding_dim, hidden_size=embed_tokens.embedding_dim // 2, num_layers=1, bidirectional=True) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) # contextualize embeddings x = x.transpose(0, 1) x = self.dropout_module(x) x, _ = self.emb_ctx.forward(x) x = x.transpose(0, 1) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed @register_model_architecture("gru_transformer", "gru_transformer") def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) @register_model_architecture("gru_transformer", "gru_transformer_big") def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/constrained_decoding/normalize.py
examples/constrained_decoding/normalize.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from sacremoses.normalize import MosesPunctNormalizer def main(args): normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn) for line in sys.stdin: print(normalizer.normalize(line.rstrip()), flush=True) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--lang', '-l', default='en') parser.add_argument('--penn', '-p', action='store_true') args = parser.parse_args() main(args)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/constrained_decoding/tok.py
examples/constrained_decoding/tok.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import sacremoses def main(args): """Tokenizes, preserving tabs""" mt = sacremoses.MosesTokenizer(lang=args.lang) def tok(s): return mt.tokenize(s, return_str=True) for line in sys.stdin: parts = list(map(tok, line.split("\t"))) print(*parts, sep="\t", flush=True) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--lang', '-l', default='en') parser.add_argument('--penn', '-p', action='store_true') parser.add_argument('--fields', '-f', help="fields to tokenize") args = parser.parse_args() main(args)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/paraphraser/paraphrase.py
examples/paraphraser/paraphrase.py
#!/usr/bin/env python3 -u import argparse import fileinput import logging import os import sys from fairseq.models.transformer import TransformerModel logging.getLogger().setLevel(logging.INFO) def main(): parser = argparse.ArgumentParser(description='') parser.add_argument('--en2fr', required=True, help='path to en2fr model') parser.add_argument('--fr2en', required=True, help='path to fr2en mixture of experts model') parser.add_argument('--user-dir', help='path to fairseq examples/translation_moe/src directory') parser.add_argument('--num-experts', type=int, default=10, help='(keep at 10 unless using a different model)') parser.add_argument('files', nargs='*', default=['-'], help='input files to paraphrase; "-" for stdin') args = parser.parse_args() if args.user_dir is None: args.user_dir = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), # examples/ 'translation_moe', 'src', ) if os.path.exists(args.user_dir): logging.info('found user_dir:' + args.user_dir) else: raise RuntimeError( 'cannot find fairseq examples/translation_moe/src ' '(tried looking here: {})'.format(args.user_dir) ) logging.info('loading en2fr model from:' + args.en2fr) en2fr = TransformerModel.from_pretrained( model_name_or_path=args.en2fr, tokenizer='moses', bpe='sentencepiece', ).eval() logging.info('loading fr2en model from:' + args.fr2en) fr2en = TransformerModel.from_pretrained( model_name_or_path=args.fr2en, tokenizer='moses', bpe='sentencepiece', user_dir=args.user_dir, task='translation_moe', ).eval() def gen_paraphrases(en): fr = en2fr.translate(en) return [ fr2en.translate(fr, inference_step_args={'expert': i}) for i in range(args.num_experts) ] logging.info('Type the input sentence and press return:') for line in fileinput.input(args.files): line = line.strip() if len(line) == 0: continue for paraphrase in gen_paraphrases(line): print(paraphrase) if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/megatron_11b/detok.py
examples/megatron_11b/detok.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import fileinput import sacremoses def main(): parser = argparse.ArgumentParser(description='') parser.add_argument('files', nargs='*', help='input files') args = parser.parse_args() detok = sacremoses.MosesDetokenizer() for line in fileinput.input(args.files, openhook=fileinput.hook_compressed): print(detok.detokenize(line.strip().split(' ')).replace(' @', '').replace('@ ', '').replace(' =', '=').replace('= ', '=').replace(' – ', '–')) if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/wav2vec/wav2vec_featurize.py
examples/wav2vec/wav2vec_featurize.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a wav2letter++ dataset """ import argparse import glob import os from shutil import copy import h5py import soundfile as sf import numpy as np import torch from torch import nn import tqdm from fairseq.models.wav2vec.wav2vec import Wav2VecModel def read_audio(fname): """ Load an audio file and return PCM along with the sample rate """ wav, sr = sf.read(fname) assert sr == 16e3 return wav, 16e3 class PretrainedWav2VecModel(nn.Module): def __init__(self, fname): super().__init__() checkpoint = torch.load(fname) self.args = checkpoint["args"] model = Wav2VecModel.build_model(self.args, None) model.load_state_dict(checkpoint["model"]) model.eval() self.model = model def forward(self, x): with torch.no_grad(): z = self.model.feature_extractor(x) if isinstance(z, tuple): z = z[0] c = self.model.feature_aggregator(z) return z, c class EmbeddingWriterConfig(argparse.ArgumentParser): def __init__(self): super().__init__("Pre-compute embeddings for wav2letter++ datasets") kwargs = {"action": "store", "type": str, "required": True} self.add_argument("--input", "-i", help="Input Directory", **kwargs) self.add_argument("--output", "-o", help="Output Directory", **kwargs) self.add_argument("--model", help="Path to model checkpoint", **kwargs) self.add_argument("--split", help="Dataset Splits", nargs='+', **kwargs) self.add_argument("--ext", default="wav", required=False, help="Audio file extension") self.add_argument("--no-copy-labels", action="store_true", help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.") self.add_argument("--use-feat", action="store_true", help="Use the feature vector ('z') instead of context vector ('c') for features") self.add_argument("--gpu", help="GPU to use", default=0, type=int) class Prediction(): """ Lightweight wrapper around a fairspeech embedding model """ def __init__(self, fname, gpu=0): self.gpu = gpu self.model = PretrainedWav2VecModel(fname).cuda(gpu) def __call__(self, x): x = torch.from_numpy(x).float().cuda(self.gpu) with torch.no_grad(): z, c = self.model(x.unsqueeze(0)) return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy() class H5Writer(): """ Write features as hdf5 file in wav2letter++ compatible format """ def __init__(self, fname): self.fname = fname os.makedirs(os.path.dirname(self.fname), exist_ok=True) def write(self, data): channel, T = data.shape with h5py.File(self.fname, "w") as out_ds: data = data.T.flatten() out_ds["features"] = data out_ds["info"] = np.array([16e3 // 160, T, channel]) class EmbeddingDatasetWriter(object): """ Given a model and a wav2letter++ dataset, pre-compute and store embeddings Args: input_root, str : Path to the wav2letter++ dataset output_root, str : Desired output directory. Will be created if non-existent split, str : Dataset split """ def __init__(self, input_root, output_root, split, model_fname, extension="wav", gpu=0, verbose=False, use_feat=False, ): assert os.path.exists(model_fname) self.model_fname = model_fname self.model = Prediction(self.model_fname, gpu) self.input_root = input_root self.output_root = output_root self.split = split self.verbose = verbose self.extension = extension self.use_feat = use_feat assert os.path.exists(self.input_path), \ "Input path '{}' does not exist".format(self.input_path) def _progress(self, iterable, **kwargs): if self.verbose: return tqdm.tqdm(iterable, **kwargs) return iterable def require_output_path(self, fname=None): path = self.get_output_path(fname) os.makedirs(path, exist_ok=True) @property def input_path(self): return self.get_input_path() @property def output_path(self): return self.get_output_path() def get_input_path(self, fname=None): if fname is None: return os.path.join(self.input_root, self.split) return os.path.join(self.get_input_path(), fname) def get_output_path(self, fname=None): if fname is None: return os.path.join(self.output_root, self.split) return os.path.join(self.get_output_path(), fname) def copy_labels(self): self.require_output_path() labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*")))) for fname in tqdm.tqdm(labels): copy(fname, self.output_path) @property def input_fnames(self): return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension)))) def __len__(self): return len(self.input_fnames) def write_features(self): paths = self.input_fnames fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \ map(os.path.basename, paths)) for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)): wav, sr = read_audio(name) z, c = self.model(wav) feat = z if self.use_feat else c writer = H5Writer(target_fname) writer.write(feat) def __repr__(self): return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format( n_files=len(self), **self.__dict__) if __name__ == "__main__": args = EmbeddingWriterConfig().parse_args() for split in args.split: writer = EmbeddingDatasetWriter( input_root=args.input, output_root=args.output, split=split, model_fname=args.model, gpu=args.gpu, extension=args.ext, use_feat=args.use_feat, ) print(writer) writer.require_output_path() print("Writing Features...") writer.write_features() print("Done.") if not args.no_copy_labels: print("Copying label data...") writer.copy_labels() print("Done.")
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/wav2vec/vq-wav2vec_featurize.py
examples/wav2vec/vq-wav2vec_featurize.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a wav2letter++ dataset """ import pprint import glob, os, argparse import torch from torch import nn try: import tqdm except: print("Install tqdm to use --log-format=tqdm") from fairseq.models.wav2vec.wav2vec import Wav2VecModel import tqdm import soundfile as sf from torch.utils.data import DataLoader import os.path as osp class FilesDataset: def __init__(self, files, labels): self.files = files if labels and osp.exists(labels): with open(labels, 'r') as lbl_f: self.labels = [line.rstrip() for line in lbl_f] else: self.labels = labels def __len__(self): return len(self.files) def __getitem__(self, index): fname = self.files[index] wav, sr = sf.read(fname) assert sr == 16000 wav = torch.from_numpy(wav).float() lbls = None if self.labels: if isinstance(self.labels, str): lbl_file = osp.splitext(fname)[0] + "." + self.labels with open(lbl_file, 'r') as lblf: lbls = lblf.readline() assert lbls is not None else: lbls = self.labels[index] return wav, lbls def collate(self, batch): return batch class ArgTypes: @staticmethod def existing_path(arg): arg = str(arg) assert osp.exists(arg), f"File {arg} does not exist" return arg @staticmethod def mkdir(arg): arg = str(arg) os.makedirs(arg, exist_ok=True) return arg class DatasetWriter: def __init__(self): self.args = self.load_config() pprint.pprint(self.args.__dict__) self.model = self.load_model() def __getattr__(self, attr): return getattr(self.args, attr) def read_manifest(self, fname): with open(fname, "r") as fp: lines = fp.read().split("\n") root = lines.pop(0).strip() fnames = [ osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0 ] return fnames def process_splits(self): if self.args.shard is not None or self.args.num_shards is not None: assert self.args.shard is not None and self.args.num_shards is not None for split in self.splits: print(split) if self.extension == "tsv": datadir = osp.join(self.data_dir, f"{split}.{self.extension}") print("Reading manifest file: ", datadir) files = self.read_manifest(datadir) else: datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}") files = glob.glob(datadir, recursive=True) assert len(files) > 0 if self.args.shard is not None: files = files[self.args.shard::self.args.num_shards] lbls = [] with open(self.data_file(split), 'w') as srcf: for line, lbl in self.iterate(files): print(line, file=srcf) if self.args.labels: lbls.append(lbl + '\n') if self.args.labels: assert all(a is not None for a in lbls) with open(self.lbl_file(split), 'w') as lblf: lblf.writelines(lbls) def iterate(self, files): data = self.load_data(files) for samples in tqdm.tqdm(data, total=len(files)//32): for wav, lbl in samples: x = wav.unsqueeze(0).float().cuda() div = 1 while x.size(-1) // div > self.args.max_size: div += 1 xs = x.chunk(div, dim=-1) result = [] for x in xs: torch.cuda.empty_cache() x = self.model.feature_extractor(x) if self.quantize_location == "encoder": with torch.no_grad(): _, idx = self.model.vector_quantizer.forward_idx(x) idx = idx.squeeze(0).cpu() else: with torch.no_grad(): z = self.model.feature_aggregator(x) _, idx = self.model.vector_quantizer.forward_idx(z) idx = idx.squeeze(0).cpu() result.append(idx) idx = torch.cat(result, dim=0) yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl def lbl_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.lbl{shard_part}") def data_file(self, name): shard_part = "" if self.args.shard is None else f".{self.args.shard}" return osp.join(self.output_dir, f"{name}.src{shard_part}") def var_file(self): return osp.join(self.output_dir, f"vars.pt") def load_config(self): parser = argparse.ArgumentParser("Vector Quantized wav2vec features") # Model Arguments parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True) parser.add_argument("--data-parallel", action="store_true") # Output Arguments parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True) # Data Arguments parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True) parser.add_argument("--splits", type=str, nargs="+", required=True) parser.add_argument("--extension", type=str, required=True) parser.add_argument("--labels", type=str, required=False) parser.add_argument("--shard", type=int, default=None) parser.add_argument("--num-shards", type=int, default=None) parser.add_argument("--max-size", type=int, default=1300000) # Logger Arguments parser.add_argument( "--log-format", type=str, choices=["none", "simple", "tqdm"] ) return parser.parse_args() def load_data(self, fnames): dataset = FilesDataset(fnames, self.args.labels) loader = DataLoader( dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8 ) return loader def load_model(self): cp = torch.load(self.checkpoint, map_location=lambda x, _: x) model = Wav2VecModel.build_model(cp["args"], None) self.quantize_location = getattr(cp["args"], "vq", "encoder") model.load_state_dict(cp["model"]) model.eval().float() model.cuda() if self.data_parallel: model = nn.DataParallel(model) return model def __call__(self): self.process_splits() if hasattr(self.model.feature_extractor, "vars") and (self.args.shard is None or self.args.shard == 0): vars = ( self.model.feature_extractor.vars.view( self.model.feature_extractor.banks, self.model.feature_extractor.num_vars, -1, ) .cpu() .detach() ) print("writing learned latent variable embeddings: ", vars.shape) torch.save(vars, self.var_file()) if __name__ == "__main__": write_data = DatasetWriter() write_data() print("Done.")
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/wav2vec/wav2vec_manifest.py
examples/wav2vec/wav2vec_manifest.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Data pre-processing: build vocabularies and binarize training data. """ import argparse import glob import os import soundfile import random def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index') parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D', help='percentage of data to use as validation set (between 0 and 1)') parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory') parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for') parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed') parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG', help='if set, path must contain this substring for a file to be included in the manifest') return parser def main(args): assert args.valid_percent >= 0 and args.valid_percent <= 1. dir_path = os.path.realpath(args.root) search_path = os.path.join(dir_path, '**/*.' + args.ext) rand = random.Random(args.seed) with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open( os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f: print(dir_path, file=train_f) print(dir_path, file=valid_f) for fname in glob.iglob(search_path, recursive=True): file_path = os.path.realpath(fname) if args.path_must_contain and args.path_must_contain not in file_path: continue frames = soundfile.info(fname).frames dest = train_f if rand.random() > args.valid_percent else valid_f print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest) if __name__ == '__main__': parser = get_parser() args = parser.parse_args() main(args)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/wav2vec/libri_labels.py
examples/wav2vec/libri_labels.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Helper script to pre-compute embeddings for a wav2letter++ dataset """ import argparse import os def main(): parser = argparse.ArgumentParser() parser.add_argument("tsv") parser.add_argument("--output-dir", required=True) parser.add_argument("--output-name", required=True) args = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) transcriptions = {} with open(args.tsv, "r") as tsv, open( os.path.join(args.output_dir, args.output_name + ".ltr"), "w" ) as ltr_out, open( os.path.join(args.output_dir, args.output_name + ".wrd"), "w" ) as wrd_out: root = next(tsv).strip() for line in tsv: line = line.strip() dir = os.path.dirname(line) if dir not in transcriptions: parts = dir.split("/") trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt" path = os.path.join(root, dir, trans_path) assert os.path.exists(path) texts = {} with open(path, "r") as trans_f: for tline in trans_f: items = tline.strip().split() texts[items[0]] = " ".join(items[1:]) transcriptions[dir] = texts part = os.path.basename(line).split(".")[0] assert part in transcriptions[dir] print(transcriptions[dir][part], file=wrd_out) print( " ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |", file=ltr_out, ) if __name__ == "__main__": main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/__init__.py
examples/simultaneous_translation/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import criterions, models, eval # noqa
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/models/transformer_monotonic_attention.py
examples/simultaneous_translation/models/transformer_monotonic_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( register_model, register_model_architecture, ) from fairseq.models.transformer import ( TransformerModel, TransformerEncoder, TransformerDecoder, base_architecture, transformer_iwslt_de_en, transformer_vaswani_wmt_en_de_big, ) from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( TransformerMonotonicDecoderLayer, TransformerMonotonicEncoderLayer ) DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 @register_model('transformer_unidirectional') class TransformerUnidirectionalModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @register_model('transformer_monotonic') class TransformerMonotonicModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerMonotonicEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) def _indices_from_states(self, states): if type(states["indices"]["src"]) == list: if next(self.parameters()).is_cuda: tensor = torch.cuda.LongTensor else: tensor = torch.LongTensor src_indices = tensor( [states["indices"]["src"][: 1 + states["steps"]["src"]]] ) tgt_indices = tensor( [ [self.decoder.dictionary.eos()] + states["indices"]["tgt"] ] ) else: src_indices = states["indices"]["src"][: 1 + states["steps"]["src"]] tgt_indices = states["indices"]["tgt"] return src_indices, None, tgt_indices def predict_from_states(self, states): decoder_states = self.decoder.output_layer( states["decoder_features"] ) lprobs = self.get_normalized_probs( [decoder_states[:, -1:]], log_probs=True ) index = lprobs.argmax(dim=-1) token = self.decoder.dictionary.string(index) return token, index[0, 0].item() def decision_from_states(self, states): ''' This funcion take states dictionary as input, and gives the agent a decision of whether read a token from server. Moreover, the decoder states are also calculated here so we can directly generate a target token without recompute every thing ''' self.eval() if len(states["tokens"]["src"]) == 0: return 0 src_indices, src_lengths, tgt_indices = self._indices_from_states( states) # Update encoder states if needed if ( "encoder_states" not in states or states["encoder_states"][0].size(1) <= states["steps"]["src"] ): encoder_out_dict = self.encoder(src_indices, src_lengths) states["encoder_states"] = encoder_out_dict else: encoder_out_dict = states["encoder_states"] # online means we still need tokens to feed the model states["model_states"]["online"] = not ( states["finish_read"] and len(states["tokens"]["src"]) == states["steps"]["src"] ) states["model_states"]["steps"] = states["steps"] x, outputs = self.decoder.forward( prev_output_tokens=tgt_indices, encoder_out=encoder_out_dict, incremental_state=states["model_states"], features_only=True, ) states["decoder_features"] = x return outputs["action"] class TransformerMonotonicEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend([ TransformerMonotonicEncoderLayer(args) for i in range(args.encoder_layers) ]) class TransformerMonotonicDecoder(TransformerDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False) self.dictionary = dictionary self.layers = nn.ModuleList([]) self.layers.extend([ TransformerMonotonicDecoderLayer(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) def pre_attention( self, prev_output_tokens, encoder_out_dict, incremental_state=None ): positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) encoder_out = encoder_out_dict.encoder_out encoder_padding_mask = encoder_out_dict.encoder_padding_mask return x, encoder_out, encoder_padding_mask def post_attention(self, x): if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x def extract_features( self, prev_output_tokens, encoder_out, incremental_state=None, **unused ): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # incremental_state = None ( x, encoder_outs, encoder_padding_mask ) = self.pre_attention( prev_output_tokens, encoder_out, incremental_state ) attn = None inner_states = [x] attn_list = [] step_list = [] for i, layer in enumerate(self.layers): x, attn, _ = layer( x=x, encoder_out=encoder_outs, encoder_padding_mask=encoder_padding_mask, incremental_state=incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) attn_list.append(attn) if incremental_state is not None: curr_steps = layer.get_steps(incremental_state) step_list.append(curr_steps) if incremental_state.get("online", False): p_choose = attn["p_choose"].squeeze(0).squeeze(1).gather(1, curr_steps.t()) new_steps = ( curr_steps + (p_choose < 0.5).t().type_as(curr_steps) ) if (new_steps >= incremental_state["steps"]["src"]).any(): # We need to prune the last self_attn saved_state # if model decide not to read # otherwise there will be duplicated saved_state for j in range(i + 1): self.layers[j].prune_incremental_state( incremental_state) return x, {"action": 0} if ( incremental_state is not None and not incremental_state.get("online", False) ): # Here is for fast evaluation fastest_step = torch.max( torch.cat(step_list, dim=1), dim=1, keepdim=True )[0] + 1 if "fastest_step" in incremental_state: incremental_state["fastest_step"] = torch.cat( [incremental_state["fastest_step"], fastest_step], dim=1 ) else: incremental_state["fastest_step"] = fastest_step x = self.post_attention(x) return x, { "action": 1, "attn_list": attn_list, "step_list": step_list, "encoder_out": encoder_out, "encoder_padding_mask": encoder_padding_mask, } def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) if "fastest_step" in incremental_state: incremental_state["fastest_step"] = ( incremental_state["fastest_step"] .index_select(0, new_order) ) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic' ) def base_monotonic_rchitecture(args): base_architecture(args) args.encoder_unidirectional = getattr( args, 'encoder_unidirectional', False) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic_iwslt_de_en' ) def transformer_monotonic_iwslt_de_en(args): transformer_iwslt_de_en(args) base_monotonic_rchitecture(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic_vaswani_wmt_en_de_big' ) def transformer_monotonic_vaswani_wmt_en_de_big(args): transformer_vaswani_wmt_en_de_big(args) @register_model_architecture( 'transformer_monotonic', 'transformer_monotonic_vaswani_wmt_en_fr_big' ) def transformer_monotonic_vaswani_wmt_en_fr_big(args): transformer_monotonic_vaswani_wmt_en_fr_big(args) @register_model_architecture( 'transformer_unidirectional', 'transformer_unidirectional_iwslt_de_en' ) def transformer_unidirectional_iwslt_de_en(args): transformer_iwslt_de_en(args)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/models/__init__.py
examples/simultaneous_translation/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): model_name = file[:file.find('.py')] importlib.import_module('examples.simultaneous_translation.models.' + model_name)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/utils/latency.py
examples/simultaneous_translation/utils/latency.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class LatencyMetric(object): @staticmethod def length_from_padding_mask(padding_mask, batch_first: bool = False): dim = 1 if batch_first else 0 return padding_mask.size(dim) - padding_mask.sum(dim=dim, keepdim=True) def prepare_latency_metric( self, delays, src_lens, target_padding_mask=None, batch_first: bool = False, start_from_zero: bool = True ): assert len(delays.size()) == 2 assert len(src_lens.size()) == 2 if start_from_zero: delays = delays + 1 if batch_first: # convert to batch_last delays = delays.t() src_lens = src_lens.t() tgt_len, bsz = delays.size() _, bsz_1 = src_lens.size() if target_padding_mask is not None: target_padding_mask = target_padding_mask.t() tgt_len_1, bsz_2 = target_padding_mask.size() assert tgt_len == tgt_len_1 assert bsz == bsz_2 assert bsz == bsz_1 if target_padding_mask is None: tgt_lens = tgt_len * delays.new_ones([1, bsz]).float() else: # 1, batch_size tgt_lens = self.length_from_padding_mask(target_padding_mask, False).float() delays = delays.masked_fill(target_padding_mask, 0) return delays, src_lens, tgt_lens, target_padding_mask def __call__( self, delays, src_lens, target_padding_mask=None, batch_first: bool = False, start_from_zero: bool = True, ): delays, src_lens, tgt_lens, target_padding_mask = self.prepare_latency_metric( delays, src_lens, target_padding_mask, batch_first, start_from_zero ) return self.cal_metric(delays, src_lens, tgt_lens, target_padding_mask) @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): """ Expected sizes: delays: tgt_len, batch_size src_lens: 1, batch_size target_padding_mask: tgt_len, batch_size """ raise NotImplementedError class AverageProportion(LatencyMetric): """ Function to calculate Average Proportion from Can neural machine translation do simultaneous translation? (https://arxiv.org/abs/1606.02012) Delays are monotonic steps, range from 1 to src_len. Give src x tgt y, AP is calculated as: AP = 1 / (|x||y]) sum_i^|Y| deleys_i """ @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): if target_padding_mask is not None: AP = torch.sum(delays.masked_fill(target_padding_mask, 0), dim=0, keepdim=True) else: AP = torch.sum(delays, dim=0, keepdim=True) AP = AP / (src_lens * tgt_lens) return AP class AverageLagging(LatencyMetric): """ Function to calculate Average Lagging from STACL: Simultaneous Translation with Implicit Anticipation and Controllable Latency using Prefix-to-Prefix Framework (https://arxiv.org/abs/1810.08398) Delays are monotonic steps, range from 1 to src_len. Give src x tgt y, AP is calculated as: AL = 1 / tau sum_i^tau delays_i - (i - 1) / gamma Where gamma = |y| / |x| tau = argmin_i(delays_i = |x|) """ @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): # tau = argmin_i(delays_i = |x|) tgt_len, bsz = delays.size() lagging_padding_mask = delays >= src_lens lagging_padding_mask = torch.nn.functional.pad(lagging_padding_mask.t(), (1, 0)).t()[:-1, :] gamma = tgt_lens / src_lens lagging = delays - torch.arange(delays.size(0)).unsqueeze(1).type_as(delays).expand_as(delays) / gamma lagging.masked_fill_(lagging_padding_mask, 0) tau = (1 - lagging_padding_mask.type_as(lagging)).sum(dim=0, keepdim=True) AL = lagging.sum(dim=0, keepdim=True) / tau return AL class DifferentiableAverageLagging(LatencyMetric): """ Function to calculate Differentiable Average Lagging from Monotonic Infinite Lookback Attention for Simultaneous Machine Translation (https://arxiv.org/abs/1906.05218) Delays are monotonic steps, range from 0 to src_len-1. (In the original paper thery are from 1 to src_len) Give src x tgt y, AP is calculated as: DAL = 1 / |Y| sum_i^|Y| delays'_i - (i - 1) / gamma Where delays'_i = 1. delays_i if i == 1 2. max(delays_i, delays'_{i-1} + 1 / gamma) """ @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): tgt_len, bsz = delays.size() gamma = tgt_lens / src_lens new_delays = torch.zeros_like(delays) for i in range(delays.size(0)): if i == 0: new_delays[i] = delays[i] else: new_delays[i] = torch.cat( [ new_delays[i - 1].unsqueeze(0) + 1 / gamma, delays[i].unsqueeze(0) ], dim=0 ).max(dim=0)[0] DAL = ( new_delays - torch.arange(delays.size(0)).unsqueeze(1).type_as(delays).expand_as(delays) / gamma ) if target_padding_mask is not None: DAL = DAL.masked_fill(target_padding_mask, 0) DAL = DAL.sum(dim=0, keepdim=True) / tgt_lens return DAL class LatencyMetricVariance(LatencyMetric): def prepare_latency_metric( self, delays, src_lens, target_padding_mask=None, batch_first: bool = True, start_from_zero: bool = True ): assert batch_first assert len(delays.size()) == 3 assert len(src_lens.size()) == 2 if start_from_zero: delays = delays + 1 # convert to batch_last bsz, num_heads_x_layers, tgt_len = delays.size() bsz_1, _ = src_lens.size() assert bsz == bsz_1 if target_padding_mask is not None: bsz_2, tgt_len_1 = target_padding_mask.size() assert tgt_len == tgt_len_1 assert bsz == bsz_2 if target_padding_mask is None: tgt_lens = tgt_len * delays.new_ones([bsz, tgt_len]).float() else: # batch_size, 1 tgt_lens = self.length_from_padding_mask(target_padding_mask, True).float() delays = delays.masked_fill(target_padding_mask.unsqueeze(1), 0) return delays, src_lens, tgt_lens, target_padding_mask class VarianceDelay(LatencyMetricVariance): @staticmethod def cal_metric(delays, src_lens, tgt_lens, target_padding_mask): """ delays : bsz, num_heads_x_layers, tgt_len src_lens : bsz, 1 target_lens : bsz, 1 target_padding_mask: bsz, tgt_len or None """ if delays.size(1) == 1: return delays.new_zeros([1]) variance_delays = delays.var(dim=1) if target_padding_mask is not None: variance_delays.masked_fill_(target_padding_mask, 0) return variance_delays.sum(dim=1, keepdim=True) / tgt_lens class LatencyInference(object): def __init__(self, start_from_zero=True): self.metric_calculator = { "differentiable_average_lagging": DifferentiableAverageLagging(), "average_lagging": AverageLagging(), "average_proportion": AverageProportion(), } self.start_from_zero = start_from_zero def __call__(self, monotonic_step, src_lens): """ monotonic_step range from 0 to src_len. src_len means eos delays: bsz, tgt_len src_lens: bsz, 1 """ if not self.start_from_zero: monotonic_step -= 1 src_lens = src_lens delays = ( monotonic_step .view(monotonic_step.size(0), -1, monotonic_step.size(-1)) .max(dim=1)[0] ) delays = ( delays.masked_fill(delays >= src_lens, 0) + (src_lens - 1) .expand_as(delays) .masked_fill(delays < src_lens, 0) ) return_dict = {} for key, func in self.metric_calculator.items(): return_dict[key] = func( delays.float(), src_lens.float(), target_padding_mask=None, batch_first=True, start_from_zero=True ).t() return return_dict class LatencyTraining(object): def __init__( self, avg_weight, var_weight, avg_type, var_type, stay_on_last_token, average_method, ): self.avg_weight = avg_weight self.var_weight = var_weight self.avg_type = avg_type self.var_type = var_type self.stay_on_last_token = stay_on_last_token self.average_method = average_method self.metric_calculator = { "differentiable_average_lagging": DifferentiableAverageLagging(), "average_lagging": AverageLagging(), "average_proportion": AverageProportion(), } self.variance_calculator = { "variance_delay": VarianceDelay(), } def expected_delays_from_attention( self, attention, source_padding_mask=None, target_padding_mask=None ): if type(attention) == list: # bsz, num_heads, tgt_len, src_len bsz, num_heads, tgt_len, src_len = attention[0].size() attention = torch.cat(attention, dim=1) bsz, num_heads_x_layers, tgt_len, src_len = attention.size() # bsz * num_heads * num_layers, tgt_len, src_len attention = attention.view(-1, tgt_len, src_len) else: # bsz * num_heads * num_layers, tgt_len, src_len bsz, tgt_len, src_len = attention.size() num_heads_x_layers = 1 attention = attention.view(-1, tgt_len, src_len) if not self.stay_on_last_token: residual_attention = \ 1 - attention[:, :, :-1].sum(dim=2, keepdim=True) attention = torch.cat( [attention[:, :, :-1], residual_attention], dim=2 ) # bsz * num_heads_x_num_layers, tgt_len, src_len for MMA steps = ( torch .arange(1, 1 + src_len) .unsqueeze(0) .unsqueeze(1) .expand_as(attention) .type_as(attention) ) if source_padding_mask is not None: src_offset = ( source_padding_mask.type_as(attention) .sum(dim=1, keepdim=True) .expand(bsz, num_heads_x_layers) .contiguous() .view(-1, 1) ) src_lens = src_len - src_offset if source_padding_mask[:, 0].any(): # Pad left src_offset = src_offset.view(-1, 1, 1) steps = steps - src_offset steps = steps.masked_fill(steps <= 0, 0) else: src_lens = attention.new_ones([bsz, num_heads_x_layers]) * src_len src_lens = src_lens.view(-1, 1) # bsz * num_heads_num_layers, tgt_len, src_len expected_delays = (steps * attention).sum(dim=2).view( bsz, num_heads_x_layers, tgt_len ) if target_padding_mask is not None: expected_delays.masked_fill_( target_padding_mask.unsqueeze(1), 0 ) return expected_delays, src_lens def avg_loss(self, expected_delays, src_lens, target_padding_mask): bsz, num_heads_x_layers, tgt_len = expected_delays.size() target_padding_mask = ( target_padding_mask .unsqueeze(1) .expand_as(expected_delays) .contiguous() .view(-1, tgt_len) ) if self.average_method == "average": # bsz * tgt_len expected_delays = expected_delays.mean(dim=1) elif self.average_method == "weighted_average": weights = torch.nn.functional.softmax(expected_delays, dim=1) expected_delays = torch.sum(expected_delays * weights, dim=1) elif self.average_method == "max": # bsz * num_heads_x_num_layers, tgt_len expected_delays = expected_delays.max(dim=1)[0] else: raise RuntimeError(f"{self.average_method} is not supported") src_lens = src_lens.view(bsz, -1)[:, :1] target_padding_mask = target_padding_mask.view(bsz, -1, tgt_len)[:, 0] if self.avg_weight > 0.0: if self.avg_type in self.metric_calculator: average_delays = self.metric_calculator[self.avg_type]( expected_delays, src_lens, target_padding_mask, batch_first=True, start_from_zero=False ) else: raise RuntimeError(f"{self.avg_type} is not supported.") # bsz * num_heads_x_num_layers, 1 return self.avg_weight * average_delays.sum() else: return 0.0 def var_loss(self, expected_delays, src_lens, target_padding_mask): src_lens = src_lens.view(expected_delays.size(0), expected_delays.size(1))[:, :1] if self.var_weight > 0.0: if self.var_type in self.variance_calculator: variance_delays = self.variance_calculator[self.var_type]( expected_delays, src_lens, target_padding_mask, batch_first=True, start_from_zero=False ) else: raise RuntimeError(f"{self.var_type} is not supported.") return self.var_weight * variance_delays.sum() else: return 0.0 def loss(self, attention, source_padding_mask=None, target_padding_mask=None): expected_delays, src_lens = self.expected_delays_from_attention( attention, source_padding_mask, target_padding_mask ) latency_loss = 0 latency_loss += self.avg_loss(expected_delays, src_lens, target_padding_mask) latency_loss += self.var_loss(expected_delays, src_lens, target_padding_mask) return latency_loss
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/utils/__init__.py
examples/simultaneous_translation/utils/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the criterions/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('examples.simultaneous_translation.utils.' + module)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/utils/functions.py
examples/simultaneous_translation/utils/functions.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def exclusive_cumprod(tensor, dim: int, eps: float = 1e-10): """ Implementing exclusive cumprod. There is cumprod in pytorch, however there is no exclusive mode. cumprod(x) = [x1, x1x2, x2x3x4, ..., prod_{i=1}^n x_i] exclusive means cumprod(x) = [1, x1, x1x2, x1x2x3, ..., prod_{i=1}^{n-1} x_i] """ tensor_size = list(tensor.size()) tensor_size[dim] = 1 return_tensor = safe_cumprod( torch.cat([torch.ones(tensor_size).type_as(tensor), tensor], dim=dim), dim=dim, eps=eps ) if dim == 0: return return_tensor[:-1] elif dim == 1: return return_tensor[:, :-1] elif dim == 2: return return_tensor[:, :, :-1] else: raise RuntimeError("Cumprod on dimension 3 and more is not implemented") def safe_cumprod(tensor, dim: int, eps: float = 1e-10): """ An implementation of cumprod to prevent precision issue. cumprod(x) = [x1, x1x2, x1x2x3, ....] = [exp(log(x1)), exp(log(x1) + log(x2)), exp(log(x1) + log(x2) + log(x3)), ...] = exp(cumsum(log(x))) """ if (tensor + eps < 0).any().item(): raise RuntimeError( "Safe cumprod can only take non-negative tensors as input." "Consider use torch.cumprod if you want to calculate negative values." ) log_tensor = torch.log(tensor + eps) cumsum_log_tensor = torch.cumsum(log_tensor, dim) exp_cumsum_log_tensor = torch.exp(cumsum_log_tensor) return exp_cumsum_log_tensor def lengths_to_mask(lengths, max_len: int, dim: int = 0, negative_mask: bool = False): """ Convert a tensor of lengths to mask For example, lengths = [[2, 3, 4]], max_len = 5 mask = [[1, 1, 1], [1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 0]] """ assert len(lengths.size()) <= 2 if len(lengths) == 2: if dim == 1: lengths = lengths.t() lengths = lengths else: lengths = lengths.unsqueeze(1) # lengths : batch_size, 1 lengths = lengths.view(-1, 1) batch_size = lengths.size(0) # batch_size, max_len mask = torch.arange(max_len).expand(batch_size, max_len).type_as(lengths) < lengths if negative_mask: mask = ~mask if dim == 0: # max_len, batch_size mask = mask.t() return mask def moving_sum(x, start_idx: int, end_idx: int): """ From MONOTONIC CHUNKWISE ATTENTION https://arxiv.org/pdf/1712.05382.pdf Equation (18) x = [x_1, x_2, ..., x_N] MovingSum(x, start_idx, end_idx)_n = Sigma_{m=n−(start_idx−1)}^{n+end_idx-1} x_m for n in {1, 2, 3, ..., N} x : src_len, batch_size start_idx : start idx end_idx : end idx Example src_len = 5 batch_size = 3 x = [[ 0, 5, 10], [ 1, 6, 11], [ 2, 7, 12], [ 3, 8, 13], [ 4, 9, 14]] MovingSum(x, 3, 1) = [[ 0, 5, 10], [ 1, 11, 21], [ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39]] MovingSum(x, 1, 3) = [[ 3, 18, 33], [ 6, 21, 36], [ 9, 24, 39], [ 7, 17, 27], [ 4, 9, 14]] """ assert start_idx > 0 and end_idx > 0 assert len(x.size()) == 2 src_len, batch_size = x.size() # batch_size, 1, src_len x = x.t().unsqueeze(1) # batch_size, 1, src_len moving_sum_weight = x.new_ones([1, 1, end_idx + start_idx - 1]) moving_sum = torch.nn.functional.conv1d( x, moving_sum_weight, padding=start_idx + end_idx - 1 ).squeeze(1).t() moving_sum = moving_sum[end_idx: -start_idx] assert src_len == moving_sum.size(0) assert batch_size == moving_sum.size(1) return moving_sum
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/criterions/label_smoothed_cross_entropy_latency_augmented.py
examples/simultaneous_translation/criterions/label_smoothed_cross_entropy_latency_augmented.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.criterions import register_criterion from fairseq.criterions.label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion ) from examples.simultaneous_translation.utils.latency import ( LatencyTraining ) @register_criterion('latency_augmented_label_smoothed_cross_entropy') class LatencyAugmentedLabelSmoothedCrossEntropyCriterion( LabelSmoothedCrossEntropyCriterion ): def __init__(self, args, task): super().__init__(args, task) self.eps = args.label_smoothing self.latency_weight_avg = args.latency_weight_avg self.latency_weight_avg_type = args.latency_weight_avg_type self.latency_weight_var = args.latency_weight_var self.latency_weight_var_type = args.latency_weight_var_type self.mass_preservation = args.mass_preservation self.average_method = args.average_method self.latency_train = LatencyTraining( self.latency_weight_avg, self.latency_weight_var, self.latency_weight_avg_type, self.latency_weight_var_type, self.mass_preservation, self.average_method, ) @staticmethod def add_args(parser): super( LatencyAugmentedLabelSmoothedCrossEntropyCriterion, LatencyAugmentedLabelSmoothedCrossEntropyCriterion ).add_args(parser) """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument("--latency-weight-avg", default=0., type=float, metavar='D', help="Average loss weight") parser.add_argument("--latency-weight-var", default=0., type=float, metavar='D', help="Variance loss weight") parser.add_argument("--latency-weight-avg-type", default="differentiable_average_lagging", help="Statistics for Average loss type") parser.add_argument("--latency-weight-var-type", default="variance_delay", help="Statistics for variance loss type") parser.add_argument("--average-method", default="weighted_average", help="Average loss type") # fmt: on def compute_loss(self, model, net_output, sample, reduce=True): # Compute cross entropy loss first loss, nll_loss = super().compute_loss(model, net_output, sample, reduce) # Obtain the expected alignment attn_list = [item["alpha"] for item in net_output[-1]["attn_list"]] target_padding_mask = model.get_targets(sample, net_output).eq(self.padding_idx) source_padding_mask = net_output[-1].get("encoder_padding_mask", None) # Get latency loss latency_loss = self.latency_train.loss( attn_list, source_padding_mask, target_padding_mask) loss += latency_loss return loss, nll_loss
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/criterions/__init__.py
examples/simultaneous_translation/criterions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os for file in os.listdir(os.path.dirname(__file__)): if file.endswith(".py") and not file.startswith("_"): criterion_name = file[: file.find(".py")] importlib.import_module( "examples.simultaneous_translation.criterions." + criterion_name )
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/client.py
examples/simultaneous_translation/eval/client.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import requests from typing import Optional from scorers import build_scorer class SimulSTEvaluationService(object): DEFAULT_HOSTNAME = 'localhost' DEFAULT_PORT = 12321 def __init__(self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT): self.hostname = hostname self.port = port self.base_url = f'http://{self.hostname}:{self.port}' def __enter__(self): self.new_session() def __exit__(self, exc_type, exc_val, exc_tb): pass def new_session(self): # start eval session url = f'{self.base_url}' try: _ = requests.post(url) except Exception as e: print(f'Failed to start an evaluation session: {e}') print('Evaluation session started.') return self def get_scores(self): # end eval session url = f'{self.base_url}/result' try: r = requests.get(url) print('Scores: {}'.format(r.json())) print('Evaluation session finished.') except Exception as e: print(f'Failed to end an evaluation session: {e}') def get_src(self, sent_id: int, extra_params: Optional[dict] = None) -> str: url = f'{self.base_url}/src' params = {"sent_id": sent_id} if extra_params is not None: for key in extra_params.keys(): params[key] = extra_params[key] try: r = requests.get( url, params=params ) except Exception as e: print(f'Failed to request a source segment: {e}') return r.json() def send_hypo(self, sent_id: int, hypo: str) -> None: url = f'{self.base_url}/hypo' params = {"sent_id": sent_id} try: requests.put(url, params=params, data=hypo.encode("utf-8")) except Exception as e: print(f'Failed to send a translated segment: {e}') def corpus_info(self): url = f'{self.base_url}' try: r = requests.get(url) except Exception as e: print(f'Failed to request corpus information: {e}') return r.json() class SimulSTLocalEvaluationService(object): def __init__(self, args): self.scorer = build_scorer(args) def get_scores(self): return self.scorer.score() def get_src(self, sent_id: int, extra_params: Optional[dict] = None) -> str: if extra_params is not None: segment_size = extra_params.get("segment_size", None) else: segment_size = None return self.scorer.send_src(int(sent_id), segment_size) def send_hypo(self, sent_id: int, hypo: str) -> None: list_of_tokens = hypo.strip().split() self.scorer.recv_hyp(sent_id, list_of_tokens) def corpus_info(self): return self.scorer.get_info()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/eval_latency.py
examples/simultaneous_translation/eval/eval_latency.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from examples.simultaneous_translation.utils.latency import LatencyInference import argparse import torch import json LATENCY_METRICS = [ 'differentiable_average_lagging', 'average_lagging', 'average_proportion', ] class LatencyScorer(): def __init__(self, start_from_zero=True): self.recorder = [] self.scores = {} self.scorer = LatencyInference() self.start_from_zero = start_from_zero def update_reorder(self, list_of_dict): self.recorder = [] for info in list_of_dict: delays = [ int(x) - int(not self.start_from_zero) for x in info["delays"] ] delays = torch.LongTensor(delays).unsqueeze(0) src_len = torch.LongTensor([info["src_len"]]).unsqueeze(0) self.recorder.append(self.scorer(delays, src_len)) def cal_latency(self): self.scores = {} for metric in LATENCY_METRICS: self.scores[metric] = sum( [x[metric][0, 0].item() for x in self.recorder] ) / len(self.recorder) return self.scores @classmethod def score(cls, list_of_dict, start_from_zero=True): scorer_to_return = cls(start_from_zero) scorer_to_return.update_reorder(list_of_dict) scorer_to_return.cal_latency() return scorer_to_return.scores if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input", required=True) parser.add_argument("--start-from-zero", action="store_true") args = parser.parse_args() scorer = LatencyInference() recorder = [] with open(args.input, 'r') as f: for line in f: info = json.loads(line) delays = [int(x) - int(not args.start_from_zero) for x in info["delays"]] delays = torch.LongTensor(delays).unsqueeze(0) src_len = torch.LongTensor([info["src_len"]]).unsqueeze(0) recorder.append(scorer(delays, src_len)) average_results = {} for metric in LATENCY_METRICS: average_results[metric] = sum( [x[metric][0, 0].item() for x in recorder] ) / len(recorder) print(f"{metric}: {average_results[metric]}")
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/__init__.py
examples/simultaneous_translation/eval/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/evaluate.py
examples/simultaneous_translation/eval/evaluate.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from client import SimulSTEvaluationService, SimulSTLocalEvaluationService from fairseq.registry import REGISTRIES from agents import build_agent DEFAULT_HOSTNAME = 'localhost' DEFAULT_PORT = 12321 def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME, help='server hostname') parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='server port number') parser.add_argument('--agent-type', default='simul_trans_text', help='Agent type') parser.add_argument('--scorer-type', default='text', help='Scorer type') parser.add_argument('--start-idx', type=int, default=0, help='Start index of the sentence to evaluate') parser.add_argument('--end-idx', type=int, default=float('inf'), help='End index of the sentence to evaluate') parser.add_argument('--scores', action="store_true", help='Request scores from server') parser.add_argument('--reset-server', action="store_true", help='Reset the server') parser.add_argument('--num-threads', type=int, default=10, help='Number of threads used by agent') parser.add_argument('--local', action="store_true", default=False, help='Local evaluation') args, _ = parser.parse_known_args() for registry_name, REGISTRY in REGISTRIES.items(): choice = getattr(args, registry_name, None) if choice is not None: cls = REGISTRY["registry"][choice] if hasattr(cls, "add_args"): cls.add_args(parser) args = parser.parse_args() return args if __name__ == "__main__": args = get_args() if args.local: session = SimulSTLocalEvaluationService(args) else: session = SimulSTEvaluationService(args.hostname, args.port) if args.reset_server: session.new_session() if args.agent_type is not None: agent = build_agent(args) agent.decode(session, args.start_idx, args.end_idx, args.num_threads) if args.scores: session.get_scores() print(session.get_scores())
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/server.py
examples/simultaneous_translation/eval/server.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import sys import json from tornado import web, ioloop from scorers import build_scorer DEFAULT_HOSTNAME = 'localhost' DEFAULT_PORT = 12321 class ScorerHandler(web.RequestHandler): def initialize(self, scorer): self.scorer = scorer class EvalSessionHandler(ScorerHandler): def post(self): self.scorer.reset() def get(self): r = json.dumps(self.scorer.get_info()) self.write(r) class ResultHandler(ScorerHandler): def get(self): r = json.dumps(self.scorer.score()) self.write(r) class SourceHandler(ScorerHandler): def get(self): sent_id = int(self.get_argument('sent_id')) segment_size = None if "segment_size" in self.request.arguments: string = self.get_argument('segment_size') if len(string) > 0: segment_size = int(string) r = json.dumps(self.scorer.send_src(int(sent_id), segment_size)) self.write(r) class HypothesisHandler(ScorerHandler): def put(self): sent_id = int(self.get_argument('sent_id')) list_of_tokens = self.request.body.decode('utf-8').strip().split() self.scorer.recv_hyp(sent_id, list_of_tokens) def add_args(): parser = argparse.ArgumentParser() # fmt: off parser.add_argument('--hostname', type=str, default=DEFAULT_HOSTNAME, help='Server hostname') parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='Server port number') args, _ = parser.parse_known_args() # fmt: on return args def start_server(scorer, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, debug=False): app = web.Application([ (r'/result', ResultHandler, dict(scorer=scorer)), (r'/src', SourceHandler, dict(scorer=scorer)), (r'/hypo', HypothesisHandler, dict(scorer=scorer)), (r'/', EvalSessionHandler, dict(scorer=scorer)), ], debug=debug) app.listen(port, max_buffer_size=1024 ** 3) sys.stdout.write(f"Evaluation Server Started. Listening to port {port}\n") ioloop.IOLoop.current().start() if __name__ == '__main__': args = add_args() scorer = build_scorer(args) start_server(scorer, args.hostname, args.port, args.debug)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/agents/word_splitter.py
examples/simultaneous_translation/eval/agents/word_splitter.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class SubwordSplitter(object): def process_line(self, string): raise NotImplementedError def split(self, string): raise NotImplementedError class NoneWordSplitter(object): def __init__(self, model): pass def split(self, string): return [string] def process_line(self, string): return [string] def finished_word(self, string): return True def merge(self, list_of_string): return "".join(list_of_string) def last_full_word_step(self, tokens, step): return len(tokens) def end_idx_last_full_word(self, tokens): return len(tokens) class BPEWordSplitter(object): # TODO: lock back here def __init__(self, model_path): super().__init__() from subword_nmt.apply_bpe import BPE with open(model_path) as f: self.model = BPE(f) def split(self, string): return self.model.process_line(string).split() def end_idx_last_full_word(self, tokens): # Begin of word indices bow_indices = [0] + [i + 1 for i, t in enumerate(tokens[1:]) if t[-2:] != '@@'] if len(bow_indices) < 2: return 0 else: return bow_indices[-1] def merge(self, list_of_string): return " ".join([item.replace("@@", "") for item in list_of_string]) class SentencePieceModelWordSplitter(object): def __init__(self, model_path): super().__init__() import sentencepiece as spm self.model = spm.SentencePieceProcessor() self.model.Load(model_path) def split(self, string): return self.model.EncodeAsPieces(string) def end_idx_last_full_word(self, tokens): # Begin of word indices bow_indices = [i for i, t in enumerate(tokens) if t[0] == '\u2581'] if len(bow_indices) < 2: return 0 else: return bow_indices[-1] def merge(self, list_of_string): return self.model.DecodePieces(list_of_string) SPLITTER_DICT = { None: NoneWordSplitter, "BPE": BPEWordSplitter, "SentencePieceModel": SentencePieceModelWordSplitter, }
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/agents/__init__.py
examples/simultaneous_translation/eval/agents/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from fairseq import registry build_agent, register_agent, MONOTONIC_AGENT = registry.setup_registry('--agent-type') DEFAULT_EOS = '</s>' GET = 0 SEND = 1 for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('agents.' + module)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/agents/simul_trans_text_agent.py
examples/simultaneous_translation/eval/agents/simul_trans_text_agent.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . simul_trans_agent import SimulTransAgent from . import DEFAULT_EOS, GET from . import register_agent from . word_splitter import SPLITTER_DICT @register_agent("simul_trans_text") class SimulTransTextAgent(SimulTransAgent): def build_word_splitter(self, args): self.word_splitter = {} self.word_splitter["src"] = SPLITTER_DICT[args.src_splitter_type]( getattr(args, f"src_splitter_path") ) self.word_splitter["tgt"] = SPLITTER_DICT[args.tgt_splitter_type]( getattr(args, f"tgt_splitter_path") ) def load_dictionary(self, task): self.dict = {} self.dict["tgt"] = task.target_dictionary self.dict["src"] = task.source_dictionary def update_states(self, states, new_state): if states["finish_read"]: return states new_word = new_state["segment"] # Split words and index the token if new_word not in [DEFAULT_EOS]: tokens = self.word_splitter["src"].split(new_word) # Get indices from dictionary # You can change to you own dictionary indices = self.dict["src"].encode_line( tokens, line_tokenizer=lambda x: x, add_if_not_exist=False, append_eos=False ).tolist() else: tokens = [new_word] indices = [self.dict["src"].eos()] states["finish_read"] = True # Update states states["segments"]["src"] += [new_word] states["tokens"]["src"] += tokens self._append_indices(states, indices, "src") return states def read_action(self, states): # Increase source step by one states["steps"]["src"] += 1 # At leat one word is read if len(states["tokens"]["src"]) == 0: return {'key': GET, 'value': None} # Only request new word if there is no buffered tokens if len(states["tokens"]["src"]) <= states["steps"]["src"]: return {'key': GET, 'value': None} return None def finish_read(self, states): # The first means all segments (full words) has been read from server # The second means all tokens (subwords) has been read locally return ( states["finish_read"] and len(states["tokens"]["src"]) == states["steps"]["src"] )
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/agents/agent.py
examples/simultaneous_translation/eval/agents/agent.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import GET, SEND, DEFAULT_EOS import time from multiprocessing.pool import ThreadPool as Pool from functools import partial class Agent(object): "an agent needs to follow this pattern" def __init__(self, *args, **kwargs): pass def init_states(self, *args, **kwargs): raise NotImplementedError def update_states(self, states, new_state): raise NotImplementedError def finish_eval(self, states, new_state): raise NotImplementedError def policy(self, state): raise NotImplementedError def reset(self): raise NotImplementedError def decode(self, session, low=0, high=100000, num_thread=10): corpus_info = session.corpus_info() high = min(corpus_info["num_sentences"] - 1, high) if low >= high: return t0 = time.time() if num_thread > 1: with Pool(10) as p: p.map( partial(self._decode_one, session), [sent_id for sent_id in range(low, high + 1)] ) else: for sent_id in range(low, high + 1): self._decode_one(session, sent_id) print(f'Finished {low} to {high} in {time.time() - t0}s') def _decode_one(self, session, sent_id): action = {} self.reset() states = self.init_states() while action.get('value', None) != DEFAULT_EOS: # take an action action = self.policy(states) if action['key'] == GET: new_states = session.get_src(sent_id, action["value"]) states = self.update_states(states, new_states) elif action['key'] == SEND: session.send_hypo(sent_id, action['value']) print(" ".join(states["tokens"]["tgt"]))
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/agents/simul_trans_agent.py
examples/simultaneous_translation/eval/agents/simul_trans_agent.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . agent import Agent from . import DEFAULT_EOS, GET, SEND from fairseq import checkpoint_utils, utils, tasks import os import json class SimulTransAgent(Agent): def __init__(self, args): # Load Model self.load_model(args) # build word spliter self.build_word_splitter(args) self.max_len = args.max_len self.eos = DEFAULT_EOS @staticmethod def add_args(parser): # fmt: off parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.') parser.add_argument("--data-bin", type=str, required=True, help="Path of data binary") parser.add_argument("--user-dir", type=str, default="example/simultaneous_translation", help="User directory for simultaneous translation") parser.add_argument("--src-splitter-type", type=str, default=None, help="Subword splitter type for source text") parser.add_argument("--tgt-splitter-type", type=str, default=None, help="Subword splitter type for target text") parser.add_argument("--src-splitter-path", type=str, default=None, help="Subword splitter model path for source text") parser.add_argument("--tgt-splitter-path", type=str, default=None, help="Subword splitter model path for target text") parser.add_argument("--max-len", type=int, default=150, help="Maximum length difference between source and target prediction") parser.add_argument('--model-overrides', default="{}", type=str, metavar='DICT', help='A dictionary used to override model args at generation ' 'that were used during model training') # fmt: on return parser def load_dictionary(self, task): raise NotImplementedError def load_model(self, args): args.user_dir = os.path.join(os.path.dirname(__file__), '..', '..') utils.import_user_module(args) filename = args.model_path if not os.path.exists(filename): raise IOError("Model file not found: {}".format(filename)) state = checkpoint_utils.load_checkpoint_to_cpu(filename, json.loads(args.model_overrides)) saved_args = state["args"] saved_args.data = args.data_bin task = tasks.setup_task(saved_args) # build model for ensemble self.model = task.build_model(saved_args) self.model.load_state_dict(state["model"], strict=True) # Set dictionary self.load_dictionary(task) def init_states(self): return { "indices": {"src": [], "tgt": []}, "tokens": {"src": [], "tgt": []}, "segments": {"src": [], "tgt": []}, "steps": {"src": 0, "tgt": 0}, "finished": False, "finish_read": False, "model_states": {} } def update_states(self, states, new_state): raise NotImplementedError def policy(self, states): # Read and Write policy action = None while action is None: if states["finished"]: # Finish the hypo by sending eos to server return self.finish_action() # Model make decision given current states decision = self.model.decision_from_states(states) if decision == 0 and not self.finish_read(states): # READ action = self.read_action(states) else: # WRITE action = self.write_action(states) # None means we make decision again but not sending server anything # This happened when read a bufffered token # Or predict a subword return action def finish_read(self, states): raise NotImplementedError def write_action(self, states): token, index = self.model.predict_from_states(states) if index == self.dict["tgt"].eos() or len(states["tokens"]["tgt"]) > self.max_len: # Finish this sentence is predict EOS states["finished"] = True end_idx_last_full_word = self._target_length(states) else: states["tokens"]["tgt"] += [token] end_idx_last_full_word = ( self.word_splitter["tgt"] .end_idx_last_full_word(states["tokens"]["tgt"]) ) self._append_indices(states, [index], "tgt") if end_idx_last_full_word > states["steps"]["tgt"]: # Only sent detokenized full words to the server word = self.word_splitter["tgt"].merge( states["tokens"]["tgt"][ states["steps"]["tgt"]: end_idx_last_full_word ] ) states["steps"]["tgt"] = end_idx_last_full_word states["segments"]["tgt"] += [word] return {'key': SEND, 'value': word} else: return None def read_action(self, states): return {'key': GET, 'value': None} def finish_action(self): return {'key': SEND, 'value': DEFAULT_EOS} def reset(self): pass def finish_eval(self, states, new_state): if len(new_state) == 0 and len(states["indices"]["src"]) == 0: return True return False def _append_indices(self, states, new_indices, key): states["indices"][key] += new_indices def _target_length(self, states): return len(states["tokens"]['tgt'])
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/scorers/scorer.py
examples/simultaneous_translation/eval/scorers/scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from vizseq.scorers.bleu import BLEUScorer from vizseq.scorers.ter import TERScorer from vizseq.scorers.meteor import METEORScorer from examples.simultaneous_translation.eval.eval_latency import LatencyScorer from collections import defaultdict import json import os DEFAULT_EOS = '</s>' class SimulScorer(object): def __init__(self, args): self.tokenizer = args.tokenizer self.output_dir = args.output if args.output is not None: self.output_files = { "text": os.path.join(args.output, "text"), "delay": os.path.join(args.output, "delay"), "scores": os.path.join(args.output, "scores") } else: self.output_files = None self.eos = DEFAULT_EOS self.data = {"tgt": []} self.reset() def get_info(self): return {"num_sentences": len(self)} @staticmethod def add_args(parser): # fmt: off parser.add_argument('--src-file', type=str, required=True, help='Source input file') parser.add_argument('--tgt-file', type=str, required=True, help='Target reference file') parser.add_argument('--tokenizer', default="13a", choices=["none", "13a"], help='Tokenizer used for sacrebleu') parser.add_argument('--output', type=str, default=None, help='Path for output directory') # fmt: on def send_src(self, sent_id, *args): raise NotImplementedError def recv_hyp(self, sent_id, list_of_tokens): for token in list_of_tokens: self.translations[ sent_id ].append( ( token, self.steps[sent_id] ) ) def reset(self): self.steps = defaultdict(int) self.translations = defaultdict(list) def src_lengths(self): raise NotImplementedError def score(self): translations = [] delays = [] for i in range(1 + max(self.translations.keys())): translations += [" ".join(t[0] for t in self.translations[i][:-1])] delays += [[t[1] for t in self.translations[i]]] bleu_score = BLEUScorer( sent_level=False, corpus_level=True, extra_args={'bleu_tokenizer': self.tokenizer} ).score(translations, [self.data["tgt"]]) ter_score = TERScorer(sent_level=False, corpus_level=True).score( translations, [self.data["tgt"]] ) meteor_score = METEORScorer(sent_level=False, corpus_level=True).score( translations, [self.data["tgt"]] ) latency_score = LatencyScorer().score( [ {"src_len": src_len, "delays": delay} for src_len, delay in zip(self.src_lengths(), delays) ], start_from_zero=False ) scores = { 'BLEU': bleu_score[0], 'TER': ter_score[0], 'METEOR': meteor_score[0], 'DAL': latency_score['differentiable_average_lagging'], 'AL': latency_score['average_lagging'], 'AP': latency_score['average_proportion'], } if self.output_files is not None: try: os.makedirs(self.output_dir, exist_ok=True) self.write_results_to_file(translations, delays, scores) except BaseException as be: print(f'Failed to write results to {self.output_dir}.') print(be) print('Skip writing predictions') return scores def write_results_to_file(self, translations, delays, scores): if self.output_files["text"] is not None: with open(self.output_files["text"], "w") as f: for line in translations: f.write(line + "\n") if self.output_files["delay"] is not None: with open(self.output_files["delay"], "w") as f: for i, delay in enumerate(delays): f.write( json.dumps( { "src_len": self.src_lengths()[i], "delays": delay } ) + "\n" ) with open(self.output_files["scores"], "w") as f: for key, value in scores.items(): f.write(f"{key}, {value}\n") @classmethod def _load_text_file(cls, file, split=False): with open(file) as f: if split: return [r.strip().split() for r in f] else: return [r.strip() for r in f] @classmethod def _load_text_from_json(cls, file): list_to_return = [] with open(file) as f: content = json.load(f) for item in content["utts"].values(): list_to_return.append(item["output"]["text"].strip()) return list_to_return @classmethod def _load_wav_info_from_json(cls, file): list_to_return = [] with open(file) as f: content = json.load(f) for item in content["utts"].values(): list_to_return.append( { "path": item["input"]["path"].strip(), "length": item["input"]["length_ms"] } ) return list_to_return @classmethod def _load_wav_info_from_list(cls, file): list_to_return = [] with open(file) as f: for line in f: list_to_return.append( { "path": line.strip(), } ) return list_to_return def __len__(self): return len(self.data["tgt"])
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/scorers/text_scorer.py
examples/simultaneous_translation/eval/scorers/text_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . scorer import SimulScorer from . import register_scorer @register_scorer("text") class SimulTextScorer(SimulScorer): def __init__(self, args): super().__init__(args) self.data = { "src": self._load_text_file(args.src_file, split=True), "tgt": self._load_text_file(args.tgt_file, split=False) } def send_src(self, sent_id, *args): if self.steps[sent_id] >= len(self.data["src"][sent_id]): dict_to_return = { "sent_id": sent_id, "segment_id": self.steps[sent_id], "segment": self.eos } # Consider EOS self.steps[sent_id] = len(self.data["src"][sent_id]) + 1 else: dict_to_return = { "sent_id": sent_id, "segment_id": self.steps[sent_id], "segment": self.data["src"][sent_id][self.steps[sent_id]] } self.steps[sent_id] += 1 return dict_to_return def src_lengths(self): # +1 for eos return [len(sent) + 1 for sent in self.data["src"]]
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/eval/scorers/__init__.py
examples/simultaneous_translation/eval/scorers/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from fairseq import registry ( build_scorer, register_scorer, SCORER_REGISTRIES ) = registry.setup_registry('--scorer-type') for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('scorers.' + module)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/modules/monotonic_multihead_attention.py
examples/simultaneous_translation/modules/monotonic_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F import torch.nn as nn from fairseq import utils from fairseq.modules import MultiheadAttention from examples.simultaneous_translation.utils.functions import ( exclusive_cumprod, lengths_to_mask ) from fairseq.incremental_decoding_utils import with_incremental_state from fairseq.utils import convert_padding_direction from . import register_monotonic_attention @with_incremental_state class MonotonicAttention(nn.Module): """ Abstract class of monotonic attentions """ def __init__(self, args): self.eps = args.attention_eps self.mass_preservation = args.mass_preservation self.noise_mean = args.noise_mean self.noise_var = args.noise_var self.energy_bias_init = args.energy_bias_init self.energy_bias = ( nn.Parameter(self.energy_bias_init * torch.ones([1])) if args.energy_bias is True else 0 ) @staticmethod def add_args(parser): # fmt: off parser.add_argument('--no-mass-preservation', action="store_false", dest="mass_preservation", help='Do not stay on the last token when decoding') parser.add_argument('--mass-preservation', action="store_true", dest="mass_preservation", help='Stay on the last token when decoding') parser.set_defaults(mass_preservation=True) parser.add_argument('--noise-var', type=float, default=1.0, help='Variance of discretness noise') parser.add_argument('--noise-mean', type=float, default=0.0, help='Mean of discretness noise') parser.add_argument('--energy-bias', action="store_true", default=False, help='Bias for energy') parser.add_argument('--energy-bias-init', type=float, default=-2.0, help='Initial value of the bias for energy') parser.add_argument('--attention-eps', type=float, default=1e-6, help='Epsilon when calculating expected attention') # fmt: on def p_choose(self, *args): raise NotImplementedError def input_projections(self, *args): raise NotImplementedError def attn_energy(self, q_proj, k_proj, key_padding_mask=None): """ Calculating monotonic energies ============================================================ Expected input size q_proj: bsz * num_heads, tgt_len, self.head_dim k_proj: bsz * num_heads, src_len, self.head_dim key_padding_mask: bsz, src_len attn_mask: tgt_len, src_len """ bsz, tgt_len, embed_dim = q_proj.size() bsz = bsz // self.num_heads src_len = k_proj.size(1) attn_energy = torch.bmm(q_proj, k_proj.transpose(1, 2)) + self.energy_bias attn_energy = attn_energy.view(bsz, self.num_heads, tgt_len, src_len) if key_padding_mask is not None: attn_energy = attn_energy.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).bool(), float('-inf'), ) return attn_energy def expected_alignment_train(self, p_choose, key_padding_mask): """ Calculating expected alignment for MMA Mask is not need because p_choose will be 0 if masked q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j} a_ij = p_ij q_ij parellel solution: ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi)) ============================================================ Expected input size p_choose: bsz * num_heads, tgt_len, src_len """ # p_choose: bsz * num_heads, tgt_len, src_len bsz_num_heads, tgt_len, src_len = p_choose.size() # cumprod_1mp : bsz * num_heads, tgt_len, src_len cumprod_1mp = exclusive_cumprod(1 - p_choose, dim=2, eps=self.eps) cumprod_1mp_clamp = torch.clamp(cumprod_1mp, self.eps, 1.0) init_attention = p_choose.new_zeros([bsz_num_heads, 1, src_len]) init_attention[:, :, 0] = 1.0 previous_attn = [init_attention] for i in range(tgt_len): # p_choose: bsz * num_heads, tgt_len, src_len # cumprod_1mp_clamp : bsz * num_heads, tgt_len, src_len # previous_attn[i]: bsz * num_heads, 1, src_len # alpha_i: bsz * num_heads, src_len alpha_i = ( p_choose[:, i] * cumprod_1mp[:, i] * torch.cumsum( previous_attn[i][:, 0] / cumprod_1mp_clamp[:, i], dim=1 ) ).clamp(0, 1.0) previous_attn.append(alpha_i.unsqueeze(1)) # alpha: bsz * num_heads, tgt_len, src_len alpha = torch.cat(previous_attn[1:], dim=1) if self.mass_preservation: # Last token has the residual probabilities alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1).clamp(0.0, 1.0) assert not torch.isnan(alpha).any(), "NaN detected in alpha." return alpha def expected_alignment_infer(self, p_choose, key_padding_mask, incremental_state): """ Calculating mo alignment for MMA during inference time ============================================================ Expected input size p_choose: bsz * num_heads, tgt_len, src_len key_padding_mask: bsz * src_len incremental_state: dict """ # p_choose: bsz * self.num_heads, src_len bsz_num_heads, tgt_len, src_len = p_choose.size() # One token at a time assert tgt_len == 1 p_choose = p_choose[:, 0, :] monotonic_cache = self._get_monotonic_buffer(incremental_state) # prev_monotonic_step: bsz, num_heads bsz = bsz_num_heads // self.num_heads prev_monotonic_step = monotonic_cache.get( "step", p_choose.new_zeros([bsz, self.num_heads]).long() ) bsz, num_heads = prev_monotonic_step.size() assert num_heads == self.num_heads assert bsz * num_heads == bsz_num_heads # p_choose: bsz, num_heads, src_len p_choose = p_choose.view(bsz, num_heads, src_len) if key_padding_mask is not None: src_lengths = src_len - \ key_padding_mask.sum(dim=1, keepdim=True).long() else: src_lengths = prev_monotonic_step.new_ones(bsz, 1) * src_len # src_lengths: bsz, num_heads src_lengths = src_lengths.expand_as(prev_monotonic_step) # new_monotonic_step: bsz, num_heads new_monotonic_step = prev_monotonic_step step_offset = 0 if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # left_pad_source = True: step_offset = key_padding_mask.sum(dim=-1, keepdim=True) max_steps = ( src_lengths - 1 if self.mass_preservation else src_lengths ) # finish_read: bsz, num_heads finish_read = new_monotonic_step.eq(max_steps) while finish_read.sum().item() < bsz * self.num_heads: # p_choose: bsz * self.num_heads, src_len # only choose the p at monotonic steps # p_choose_i: bsz , self.num_heads p_choose_i = ( p_choose .gather( 2, (step_offset + new_monotonic_step).unsqueeze(2) .clamp(0, src_len - 1) ) ).squeeze(2) action = ( (p_choose_i < 0.5) .type_as(prev_monotonic_step) .masked_fill(finish_read, 0) ) # 1 x bsz # sample actions on unfinished seq # 1 means stay, finish reading # 0 means leave, continue reading # dist = torch.distributions.bernoulli.Bernoulli(p_choose) # action = dist.sample().type_as(finish_read) * (1 - finish_read) new_monotonic_step += action finish_read = new_monotonic_step.eq(max_steps) | (action == 0) # finish_read = (~ (finish_read.sum(dim=1, keepdim=True) < self.num_heads / 2)) | finish_read monotonic_cache["step"] = new_monotonic_step # alpha: bsz * num_heads, 1, src_len # new_monotonic_step: bsz, num_heads alpha = ( p_choose .new_zeros([bsz * self.num_heads, src_len]) .scatter( 1, (step_offset + new_monotonic_step).view(bsz * self.num_heads, 1).clamp(0, src_len - 1), 1 ) ) if not self.mass_preservation: alpha = alpha.masked_fill( (new_monotonic_step == max_steps).view(bsz * self.num_heads, 1), 0 ) alpha = alpha.unsqueeze(1) self._set_monotonic_buffer(incremental_state, monotonic_cache) return alpha def v_proj_output(self, value): raise NotImplementedError def forward( self, query, key, value, key_padding_mask=None, incremental_state=None, *args, **kwargs, ): tgt_len, bsz, embed_dim = query.size() src_len = value.size(0) # stepwise prob # p_choose: bsz * self.num_heads, tgt_len, src_len p_choose = self.p_choose(query, key, key_padding_mask) # expected alignment alpha # bsz * self.num_heads, tgt_len, src_len if incremental_state is not None: alpha = self.expected_alignment_infer(p_choose, key_padding_mask, incremental_state) else: alpha = self.expected_alignment_train(p_choose, key_padding_mask) # expected attention beta # bsz * self.num_heads, tgt_len, src_len beta = self.expected_attention(alpha, query, key, value, key_padding_mask, incremental_state) attn_weights = beta v_proj = self.v_proj_output(value) attn = torch.bmm(attn_weights.type_as(v_proj), v_proj) attn = ( attn .transpose(0, 1) .contiguous() .view(tgt_len, bsz, embed_dim) ) attn = self.out_proj(attn) beta = beta.view(bsz, self.num_heads, tgt_len, src_len) alpha = alpha.view(bsz, self.num_heads, tgt_len, src_len) p_choose = p_choose.view(bsz, self.num_heads, tgt_len, src_len) return attn, {"alpha": alpha, "beta": beta, "p_choose": p_choose} def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" super().reorder_incremental_state(incremental_state, new_order) input_buffer = self._get_monotonic_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(0, new_order) self._set_monotonic_buffer(incremental_state, input_buffer) def _get_monotonic_buffer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'monotonic', ) or {} def _set_monotonic_buffer(self, incremental_state, buffer): utils.set_incremental_state( self, incremental_state, 'monotonic', buffer, ) def get_pointer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'monotonic', ) or {} def get_fastest_pointer(self, incremental_state): return self.get_pointer(incremental_state)["step"].max(0)[0] def set_pointer(self, incremental_state, p_choose): curr_pointer = self.get_pointer(incremental_state) if len(curr_pointer) == 0: buffer = torch.zeros_like(p_choose) else: buffer = self.get_pointer(incremental_state)["step"] buffer += (p_choose < 0.5).type_as(buffer) utils.set_incremental_state( self, incremental_state, 'monotonic', {"step": buffer}, ) @register_monotonic_attention("hard_aligned") class MonotonicMultiheadAttentionHard(MonotonicAttention, MultiheadAttention): def __init__(self, args): MultiheadAttention.__init__( self, embed_dim=args.decoder_embed_dim, num_heads=args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True ) MonotonicAttention.__init__(self, args) self.k_in_proj = {"monotonic": self.k_proj} self.q_in_proj = {"monotonic": self.q_proj} self.v_in_proj = {"output": self.v_proj} def input_projections(self, query, key, value, name): """ Prepare inputs for multihead attention ============================================================ Expected input size query: tgt_len, bsz, embed_dim key: src_len, bsz, embed_dim value: src_len, bsz, embed_dim name: monotonic or soft """ if query is not None: bsz = query.size(1) q = self.q_in_proj[name](query) q *= self.scaling q = q.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) else: q = None if key is not None: bsz = key.size(1) k = self.k_in_proj[name](key) k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) else: k = None if value is not None: bsz = value.size(1) v = self.v_in_proj[name](value) v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1) else: v = None return q, k, v def p_choose(self, query, key, key_padding_mask=None): """ Calculating step wise prob for reading and writing 1 to read, 0 to write ============================================================ Expected input size query: bsz, tgt_len, embed_dim key: bsz, src_len, embed_dim value: bsz, src_len, embed_dim key_padding_mask: bsz, src_len attn_mask: bsz, src_len query: bsz, tgt_len, embed_dim """ # prepare inputs q_proj, k_proj, _ = self.input_projections(query, key, None, "monotonic") # attention energy attn_energy = self.attn_energy(q_proj, k_proj, key_padding_mask) noise = 0 if self.training: # add noise here to encourage discretness noise = ( torch .normal(self.noise_mean, self.noise_var, attn_energy.size()) .type_as(attn_energy) .to(attn_energy.device) ) p_choose = torch.sigmoid(attn_energy + noise) _, _, tgt_len, src_len = p_choose.size() # p_choose: bsz * self.num_heads, tgt_len, src_len return p_choose.view(-1, tgt_len, src_len) def expected_attention(self, alpha, *args): ''' For MMA-H, beta = alpha ''' return alpha def v_proj_output(self, value): _, _, v_proj = self.input_projections(None, None, value, "output") return v_proj @register_monotonic_attention("infinite_lookback") class MonotonicMultiheadAttentionInfiniteLookback(MonotonicMultiheadAttentionHard): def __init__(self, args): super().__init__(args) self.init_soft_attention() def init_soft_attention(self): self.k_proj_soft = nn.Linear(self.kdim, self.embed_dim, bias=True) self.q_proj_soft = nn.Linear(self.embed_dim, self.embed_dim, bias=True) self.k_in_proj["soft"] = self.k_proj_soft self.q_in_proj["soft"] = self.q_proj_soft if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_in_proj["soft"].weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_in_proj["soft"].weight) nn.init.xavier_uniform_(self.q_in_proj["soft"].weight) def expected_attention(self, alpha, query, key, value, key_padding_mask, incremental_state): # monotonic attention, we will calculate milk here bsz_x_num_heads, tgt_len, src_len = alpha.size() bsz = int(bsz_x_num_heads / self.num_heads) q, k, _ = self.input_projections(query, key, None, "soft") soft_energy = self.attn_energy(q, k, key_padding_mask) assert list(soft_energy.size()) == [bsz, self.num_heads, tgt_len, src_len] soft_energy = soft_energy.view(bsz * self.num_heads, tgt_len, src_len) if incremental_state is not None: monotonic_cache = self._get_monotonic_buffer(incremental_state) monotonic_step = monotonic_cache["step"] + 1 step_offset = 0 if key_padding_mask is not None: if key_padding_mask[:, 0].any(): # left_pad_source = True: step_offset = key_padding_mask.sum(dim=-1, keepdim=True) monotonic_step += step_offset mask = lengths_to_mask( monotonic_step.view(-1), soft_energy.size(2), 1).unsqueeze(1) soft_energy = soft_energy.masked_fill(~ mask.bool(), float('-inf')) soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) exp_soft_energy_sum = exp_soft_energy.sum(dim=2) beta = exp_soft_energy / exp_soft_energy_sum.unsqueeze(2) else: # bsz * num_heads, tgt_len, src_len soft_energy = soft_energy - soft_energy.max(dim=2, keepdim=True)[0] exp_soft_energy = torch.exp(soft_energy) exp_soft_energy_cumsum = torch.cumsum(exp_soft_energy, dim=2) if key_padding_mask is not None: if key_padding_mask.any(): exp_soft_energy_cumsum = ( exp_soft_energy_cumsum.view(-1, self.num_heads, tgt_len, src_len) .masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(1), self.eps) .view(-1, tgt_len, src_len) ) inner_items = alpha / exp_soft_energy_cumsum beta = exp_soft_energy * torch.cumsum(inner_items.flip(dims=[2]), dim=2).flip(dims=[2]) beta = self.dropout_module(beta) assert not torch.isnan(beta).any(), "NaN detected in beta." return beta @register_monotonic_attention("waitk") class MonotonicMultiheadAttentionWaitk(MonotonicMultiheadAttentionInfiniteLookback): def __init__(self, args): super().__init__(args) self.q_in_proj["soft"] = self.q_in_proj["monotonic"] self.k_in_proj["soft"] = self.k_in_proj["monotonic"] self.waitk_lagging = args.waitk_lagging assert self.waitk_lagging > 0, f"Lagging has to been larger than 0, get {self.waitk_lagging}." @staticmethod def add_args(parser): super( MonotonicMultiheadAttentionWaitk, MonotonicMultiheadAttentionWaitk, ).add_args(parser) parser.add_argument('--waitk-lagging', type=int, required=True, help='Wait k lagging') def p_choose(self, query, key, key_padding_mask=None, attn_mask=None, incremental_state=None): """ query: bsz, tgt_len key: bsz, src_len key_padding_mask: bsz, src_len """ src_len, bsz, _ = key.size() tgt_len, bsz, _ = query.size() p_choose = query.new_ones(bsz, tgt_len, src_len) p_choose = torch.tril(p_choose, diagonal=self.waitk_lagging - 1) p_choose = torch.triu(p_choose, diagonal=self.waitk_lagging - 1) if key_padding_mask is not None and key_padding_mask[:, 0].eq(1).any(): # Left pad source # add -1 to the end p_choose = p_choose.masked_fill(key_padding_mask.float().flip(1).unsqueeze(1).bool(), -1) p_choose = convert_padding_direction(p_choose.view(-1, src_len).long(), padding_idx=-1, right_to_left=True) p_choose = p_choose.view(bsz, tgt_len, src_len).type_as(query) # remove -1 p_choose[p_choose.eq(-1)] = 0 # Extend to each head p_choose = ( p_choose.contiguous().unsqueeze(1) .expand(-1, self.num_heads, -1, -1).contiguous() .view(-1, tgt_len, src_len) ) return p_choose
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/modules/__init__.py
examples/simultaneous_translation/modules/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from fairseq import registry ( build_monotonic_attention, register_monotonic_attention, MONOTONIC_ATTENTION_REGISTRY ) = registry.setup_registry('--simul-type') for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): model_name = file[:file.find('.py')] importlib.import_module('examples.simultaneous_translation.modules.' + model_name)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
examples/simultaneous_translation/modules/monotonic_transformer_layer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.modules import ( LayerNorm, TransformerEncoderLayer, TransformerDecoderLayer ) from . import build_monotonic_attention class TransformerMonotonicEncoderLayer(TransformerEncoderLayer): def forward(self, x, encoder_padding_mask): seq_len, _, _ = x.size() attn_mask = x.new_ones([seq_len, seq_len]).triu(1) attn_mask = attn_mask.masked_fill(attn_mask.bool(), float('-inf')) return super().forward(x, encoder_padding_mask, attn_mask) class TransformerMonotonicDecoderLayer(TransformerDecoderLayer): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__( args, no_encoder_attn=True, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn ) self.encoder_attn = build_monotonic_attention(args) self.encoder_attn_layer_norm = LayerNorm( self.embed_dim, export=getattr(args, 'char_inputs', False) ) def prune_incremental_state(self, incremental_state): def prune(module): input_buffer = module._get_input_buffer(incremental_state) for key in ["prev_key", "prev_value"]: if input_buffer[key].size(2) > 1: input_buffer[key] = input_buffer[key][:, :, :-1, :] else: input_buffer = {} break module._set_input_buffer(incremental_state, input_buffer) prune(self.self_attn) def get_steps(self, incremental_state): return ( self.encoder_attn ._get_monotonic_buffer( incremental_state ).get("step", 0) )
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/backtranslation/deduplicate_lines.py
examples/backtranslation/deduplicate_lines.py
#!/usr/bin/python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import fileinput import hashlib from multiprocessing import Pool import sys def get_hashes_and_lines(raw_line): hash = hashlib.md5(raw_line).hexdigest() return hash, raw_line def main(): parser = argparse.ArgumentParser() parser.add_argument('--workers', type=int, default=10) parser.add_argument('files', nargs='*', help='input files') args = parser.parse_args() seen = set() with fileinput.input(args.files, mode='rb') as h: pool = Pool(args.workers) results = pool.imap_unordered(get_hashes_and_lines, h, 1000) for i, (hash, raw_line) in enumerate(results): if hash not in seen: seen.add(hash) sys.stdout.buffer.write(raw_line) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) print(file=sys.stderr, flush=True) if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/backtranslation/extract_bt_data.py
examples/backtranslation/extract_bt_data.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import fileinput from tqdm import tqdm def main(): parser = argparse.ArgumentParser(description=( 'Extract back-translations from the stdout of fairseq-generate. ' 'If there are multiply hypotheses for a source, we only keep the first one. ' )) parser.add_argument('--output', required=True, help='output prefix') parser.add_argument('--srclang', required=True, help='source language (extracted from H-* lines)') parser.add_argument('--tgtlang', required=True, help='target language (extracted from S-* lines)') parser.add_argument('--minlen', type=int, help='min length filter') parser.add_argument('--maxlen', type=int, help='max length filter') parser.add_argument('--ratio', type=float, help='ratio filter') parser.add_argument('files', nargs='*', help='input files') args = parser.parse_args() def validate(src, tgt): srclen = len(src.split(' ')) if src != '' else 0 tgtlen = len(tgt.split(' ')) if tgt != '' else 0 if ( (args.minlen is not None and (srclen < args.minlen or tgtlen < args.minlen)) or (args.maxlen is not None and (srclen > args.maxlen or tgtlen > args.maxlen)) or (args.ratio is not None and (max(srclen, tgtlen) / float(min(srclen, tgtlen)) > args.ratio)) ): return False return True def safe_index(toks, index, default): try: return toks[index] except IndexError: return default with open(args.output + '.' + args.srclang, 'w') as src_h, \ open(args.output + '.' + args.tgtlang, 'w') as tgt_h: for line in tqdm(fileinput.input(args.files)): if line.startswith('S-'): tgt = safe_index(line.rstrip().split('\t'), 1, '') elif line.startswith('H-'): if tgt is not None: src = safe_index(line.rstrip().split('\t'), 2, '') if validate(src, tgt): print(src, file=src_h) print(tgt, file=tgt_h) tgt = None if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/preprocess_RACE.py
examples/roberta/preprocess_RACE.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os import re class InputExample: def __init__(self, paragraph, qa_list, label): self.paragraph = paragraph self.qa_list = qa_list self.label = label def get_examples(data_dir, set_type): """ Extract paragraph and question-answer list from each json file """ examples = [] levels = ["middle", "high"] set_type_c = set_type.split('-') if len(set_type_c) == 2: levels = [set_type_c[1]] set_type = set_type_c[0] for level in levels: cur_dir = os.path.join(data_dir, set_type, level) for filename in os.listdir(cur_dir): cur_path = os.path.join(cur_dir, filename) with open(cur_path, 'r') as f: cur_data = json.load(f) answers = cur_data["answers"] options = cur_data["options"] questions = cur_data["questions"] context = cur_data["article"].replace("\n", " ") context = re.sub(r'\s+', ' ', context) for i in range(len(answers)): label = ord(answers[i]) - ord("A") qa_list = [] question = questions[i] for j in range(4): option = options[i][j] if "_" in question: qa_cat = question.replace("_", option) else: qa_cat = " ".join([question, option]) qa_cat = re.sub(r'\s+', ' ', qa_cat) qa_list.append(qa_cat) examples.append(InputExample(context, qa_list, label)) return examples def main(): """ Helper script to extract paragraphs questions and answers from RACE datasets. """ parser = argparse.ArgumentParser() parser.add_argument( "--input-dir", help='input directory for downloaded RACE dataset', ) parser.add_argument( "--output-dir", help='output directory for extracted data', ) args = parser.parse_args() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir, exist_ok=True) for set_type in ["train", "dev", "test-middle", "test-high"]: examples = get_examples(args.input_dir, set_type) qa_file_paths = [os.path.join(args.output_dir, set_type + ".input" + str(i + 1)) for i in range(4)] qa_files = [open(qa_file_path, 'w') for qa_file_path in qa_file_paths] outf_context_path = os.path.join(args.output_dir, set_type + ".input0") outf_label_path = os.path.join(args.output_dir, set_type + ".label") outf_context = open(outf_context_path, 'w') outf_label = open(outf_label_path, 'w') for example in examples: outf_context.write(example.paragraph + '\n') for i in range(4): qa_files[i].write(example.qa_list[i] + '\n') outf_label.write(str(example.label) + '\n') for f in qa_files: f.close() outf_label.close() outf_context.close() if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/multiprocessing_bpe_encoder.py
examples/roberta/multiprocessing_bpe_encoder.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import contextlib import sys from collections import Counter from multiprocessing import Pool from fairseq.data.encoders.gpt2_bpe import get_encoder def main(): """ Helper script to encode raw text with the GPT-2 BPE using multiple processes. The encoder.json and vocab.bpe files can be obtained here: - https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json - https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe """ parser = argparse.ArgumentParser() parser.add_argument( "--encoder-json", help='path to encoder.json', ) parser.add_argument( "--vocab-bpe", type=str, help='path to vocab.bpe', ) parser.add_argument( "--inputs", nargs="+", default=['-'], help="input files to filter/encode", ) parser.add_argument( "--outputs", nargs="+", default=['-'], help="path to save encoded outputs", ) parser.add_argument( "--keep-empty", action="store_true", help="keep empty lines", ) parser.add_argument("--workers", type=int, default=20) args = parser.parse_args() assert len(args.inputs) == len(args.outputs), \ "number of input and output paths should match" with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8")) if input != "-" else sys.stdin for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8")) if output != "-" else sys.stdout for output in args.outputs ] encoder = MultiprocessingEncoder(args) pool = Pool(args.workers, initializer=encoder.initializer) encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100) stats = Counter() for i, (filt, enc_lines) in enumerate(encoded_lines, start=1): if filt == "PASS": for enc_line, output_h in zip(enc_lines, outputs): print(enc_line, file=output_h) else: stats["num_filtered_" + filt] += 1 if i % 10000 == 0: print("processed {} lines".format(i), file=sys.stderr) for k, v in stats.most_common(): print("[{}] filtered {} lines".format(k, v), file=sys.stderr) class MultiprocessingEncoder(object): def __init__(self, args): self.args = args def initializer(self): global bpe bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe) def encode(self, line): global bpe ids = bpe.encode(line) return list(map(str, ids)) def decode(self, tokens): global bpe return bpe.decode(tokens) def encode_lines(self, lines): """ Encode a set of lines. All lines will be encoded together. """ enc_lines = [] for line in lines: line = line.strip() if len(line) == 0 and not self.args.keep_empty: return ["EMPTY", None] tokens = self.encode(line) enc_lines.append(" ".join(tokens)) return ["PASS", enc_lines] def decode_lines(self, lines): dec_lines = [] for line in lines: tokens = map(int, line.strip().split()) dec_lines.append(self.decode(tokens)) return ["PASS", dec_lines] if __name__ == "__main__": main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/commonsense_qa/commonsense_qa_task.py
examples/roberta/commonsense_qa/commonsense_qa_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import numpy as np import torch from fairseq.data import ( data_utils, Dictionary, encoders, IdDataset, ListDataset, NestedDictionaryDataset, NumSamplesDataset, NumelDataset, RawLabelDataset, RightPadDataset, SortDataset, ) from fairseq.tasks import register_task, LegacyFairseqTask @register_task('commonsense_qa') class CommonsenseQATask(LegacyFairseqTask): """Task to finetune RoBERTa for Commonsense QA.""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl') parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item') parser.add_argument('--num-classes', type=int, default=5) def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol('<mask>') self.bpe = encoders.build_bpe(args) @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == 'sentence_ranking', 'Must set --criterion=sentence_ranking' # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(vocab))) return cls(args, vocab) def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ def binarize(s, append_bos=False): if self.bpe is not None: s = self.bpe.encode(s) tokens = self.vocab.encode_line( s, append_eos=True, add_if_not_exist=False, ).long() if append_bos and self.args.init_token is not None: tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens if data_path is None: data_path = os.path.join(self.args.data, split + '.jsonl') if not os.path.exists(data_path): raise FileNotFoundError('Cannot find data: {}'.format(data_path)) src_tokens = [[] for i in range(self.args.num_classes)] src_lengths = [[] for i in range(self.args.num_classes)] labels = [] with open(data_path) as h: for line in h: example = json.loads(line.strip()) if 'answerKey' in example: label = ord(example['answerKey']) - ord('A') labels.append(label) question = example['question']['stem'] assert len(example['question']['choices']) == self.args.num_classes # format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>` question = 'Q: ' + question question_toks = binarize(question, append_bos=True) for i, choice in enumerate(example['question']['choices']): src = 'A: ' + choice['text'] src_bin = torch.cat([question_toks, binarize(src)]) src_tokens[i].append(src_bin) src_lengths[i].append(len(src_bin)) assert all(len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes)) assert len(src_tokens[0]) == len(src_lengths[0]) assert len(labels) == 0 or len(labels) == len(src_tokens[0]) for i in range(self.args.num_classes): src_lengths[i] = np.array(src_lengths[i]) src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i]) src_lengths[i] = ListDataset(src_lengths[i]) dataset = { 'id': IdDataset(), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens[0], reduce=True), } for i in range(self.args.num_classes): dataset.update({ 'net_input{}'.format(i + 1): { 'src_tokens': RightPadDataset( src_tokens[i], pad_idx=self.source_dictionary.pad(), ), 'src_lengths': src_lengths[i], } }) if len(labels) > 0: dataset.update({'target': RawLabelDataset(labels)}) dataset = NestedDictionaryDataset( dataset, sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])], ) with data_utils.numpy_seed(self.args.seed): dataset = SortDataset( dataset, # shuffle sort_order=[np.random.permutation(len(dataset))], ) print('| Loaded {} with {} samples'.format(split, len(dataset))) self.datasets[split] = dataset return self.datasets[split] def build_model(self, args): from fairseq import models model = models.build_model(args, self) model.register_classification_head( 'sentence_classification_head', num_classes=1, ) return model @property def source_dictionary(self): return self.vocab @property def target_dictionary(self): return self.vocab
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/commonsense_qa/__init__.py
examples/roberta/commonsense_qa/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import commonsense_qa_task # noqa
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/wsc/wsc_utils.py
examples/roberta/wsc/wsc_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from functools import lru_cache import json def convert_sentence_to_json(sentence): if '_' in sentence: prefix, rest = sentence.split('_', 1) query, rest = rest.split('_', 1) query_index = len(prefix.rstrip().split(' ')) else: query, query_index = None, None prefix, rest = sentence.split('[', 1) pronoun, rest = rest.split(']', 1) pronoun_index = len(prefix.rstrip().split(' ')) sentence = sentence.replace('_', '').replace('[', '').replace(']', '') return { 'idx': 0, 'text': sentence, 'target': { 'span1_index': query_index, 'span1_text': query, 'span2_index': pronoun_index, 'span2_text': pronoun, }, } def extended_noun_chunks(sentence): noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks} np_start, cur_np = 0, 'NONE' for i, token in enumerate(sentence): np_type = token.pos_ if token.pos_ in {'NOUN', 'PROPN'} else 'NONE' if np_type != cur_np: if cur_np != 'NONE': noun_chunks.add((np_start, i)) if np_type != 'NONE': np_start = i cur_np = np_type if cur_np != 'NONE': noun_chunks.add((np_start, len(sentence))) return [sentence[s:e] for (s, e) in sorted(noun_chunks)] def find_token(sentence, start_pos): found_tok = None for tok in sentence: if tok.idx == start_pos: found_tok = tok break return found_tok def find_span(sentence, search_text, start=0): search_text = search_text.lower() for tok in sentence[start:]: remainder = sentence[tok.i:].text.lower() if remainder.startswith(search_text): len_to_consume = len(search_text) start_idx = tok.idx for next_tok in sentence[tok.i:]: end_idx = next_tok.idx + len(next_tok.text) if end_idx - start_idx == len_to_consume: span = sentence[tok.i:next_tok.i + 1] return span return None @lru_cache(maxsize=1) def get_detokenizer(): from sacremoses import MosesDetokenizer detok = MosesDetokenizer(lang='en') return detok @lru_cache(maxsize=1) def get_spacy_nlp(): import en_core_web_lg nlp = en_core_web_lg.load() return nlp def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False): detok = get_detokenizer() nlp = get_spacy_nlp() with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) if positive_only and 'label' in sample and not sample['label']: # only consider examples where the query is correct continue target = sample['target'] # clean up the query query = target['span1_text'] if query is not None: if '\n' in query: continue if query.endswith('.') or query.endswith(','): query = query[:-1] # split tokens tokens = sample['text'].split(' ') def strip_pronoun(x): return x.rstrip('.,"') # find the pronoun pronoun_idx = target['span2_index'] pronoun = strip_pronoun(target['span2_text']) if strip_pronoun(tokens[pronoun_idx]) != pronoun: # hack: sometimes the index is misaligned if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun: pronoun_idx += 1 else: raise Exception('Misaligned pronoun!') assert strip_pronoun(tokens[pronoun_idx]) == pronoun # split tokens before and after the pronoun before = tokens[:pronoun_idx] after = tokens[pronoun_idx + 1:] # the GPT BPE attaches leading spaces to tokens, so we keep track # of whether we need spaces before or after the pronoun leading_space = ' ' if pronoun_idx > 0 else '' trailing_space = ' ' if len(after) > 0 else '' # detokenize before = detok.detokenize(before, return_str=True) pronoun = detok.detokenize([pronoun], return_str=True) after = detok.detokenize(after, return_str=True) # hack: when the pronoun ends in a period (or comma), move the # punctuation to the "after" part if pronoun.endswith('.') or pronoun.endswith(','): after = pronoun[-1] + trailing_space + after pronoun = pronoun[:-1] # hack: when the "after" part begins with a comma or period, remove # the trailing space if after.startswith('.') or after.startswith(','): trailing_space = '' # parse sentence with spacy sentence = nlp(before + leading_space + pronoun + trailing_space + after) # find pronoun span start = len(before + leading_space) first_pronoun_tok = find_token(sentence, start_pos=start) pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i) assert pronoun_span.text == pronoun if eval: # convert to format where pronoun is surrounded by "[]" and # query is surrounded by "_" query_span = find_span(sentence, query) query_with_ws = '_{}_{}'.format( query_span.text, (' ' if query_span.text_with_ws.endswith(' ') else '') ) pronoun_with_ws = '[{}]{}'.format( pronoun_span.text, (' ' if pronoun_span.text_with_ws.endswith(' ') else '') ) if query_span.start < pronoun_span.start: first = (query_span, query_with_ws) second = (pronoun_span, pronoun_with_ws) else: first = (pronoun_span, pronoun_with_ws) second = (query_span, query_with_ws) sentence = ( sentence[:first[0].start].text_with_ws + first[1] + sentence[first[0].end:second[0].start].text_with_ws + second[1] + sentence[second[0].end:].text ) yield sentence, sample.get('label', None) else: yield sentence, pronoun_span, query, sample.get('label', None) def winogrande_jsonl_iterator(input_fname, eval=False): with open(input_fname) as fin: for line in fin: sample = json.loads(line.strip()) sentence, option1, option2 = sample['sentence'], sample['option1'],\ sample['option2'] pronoun_span = (sentence.index('_'), sentence.index('_') + 1) if eval: query, cand = option1, option2 else: query = option1 if sample['answer'] == '1' else option2 cand = option2 if sample['answer'] == '1' else option1 yield sentence, pronoun_span, query, cand def filter_noun_chunks(chunks, exclude_pronouns=False, exclude_query=None, exact_match=False): if exclude_pronouns: chunks = [ np for np in chunks if ( np.lemma_ != '-PRON-' and not all(tok.pos_ == 'PRON' for tok in np) ) ] if exclude_query is not None: excl_txt = [exclude_query.lower()] filtered_chunks = [] for chunk in chunks: lower_chunk = chunk.text.lower() found = False for excl in excl_txt: if ( (not exact_match and (lower_chunk in excl or excl in lower_chunk)) or lower_chunk == excl ): found = True break if not found: filtered_chunks.append(chunk) chunks = filtered_chunks return chunks
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/wsc/wsc_task.py
examples/roberta/wsc/wsc_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import json import os import tempfile import numpy as np import torch import torch.nn.functional as F from fairseq import utils from fairseq.data import ( data_utils, Dictionary, encoders, IdDataset, ListDataset, NestedDictionaryDataset, NumSamplesDataset, NumelDataset, PadDataset, SortDataset, ) from fairseq.tasks import register_task, LegacyFairseqTask from . import wsc_utils @register_task('wsc') class WSCTask(LegacyFairseqTask): """Task to finetune RoBERTa for Winograd Schemas.""" @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl') parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item') def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol('<mask>') self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) # hack to handle GPT-2 BPE, which includes leading spaces if args.bpe == 'gpt2': self.leading_space = True self.trailing_space = False else: self.leading_space = False self.trailing_space = True @classmethod def load_dictionary(cls, filename): """Load the dictionary from the filename Args: filename (str): the filename """ dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == 'wsc', 'Must set --criterion=wsc' # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(vocab))) return cls(args, vocab) def binarize(self, s: str, append_eos: bool = False): if self.tokenizer is not None: s = self.tokenizer.encode(s) if self.bpe is not None: s = self.bpe.encode(s) tokens = self.vocab.encode_line( s, append_eos=append_eos, add_if_not_exist=False, ).long() if self.args.init_token is not None: tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space): toks = self.binarize( prefix + leading_space + txt + trailing_space + suffix, append_eos=True, ) mask = torch.zeros_like(toks, dtype=torch.bool) mask_start = len(self.binarize(prefix)) mask_size = len(self.binarize(leading_space + txt)) mask[mask_start:mask_start + mask_size] = 1 return toks, mask def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if data_path is None: data_path = os.path.join(self.args.data, split + '.jsonl') if not os.path.exists(data_path): raise FileNotFoundError('Cannot find data: {}'.format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] labels = [] for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path): prefix = sentence[:pronoun_span.start].text suffix = sentence[pronoun_span.end:].text_with_ws # spaCy spans include trailing spaces, but we need to know about # leading spaces for the GPT-2 BPE leading_space = ' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else '' trailing_space = ' ' if pronoun_span.text_with_ws.endswith(' ') else '' # get noun phrases, excluding pronouns and anything overlapping with the query cand_spans = wsc_utils.filter_noun_chunks( wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False, ) if query is not None: query_toks, query_mask = self.binarize_with_mask( query, prefix, suffix, leading_space, trailing_space ) query_len = len(query_toks) else: query_toks, query_mask, query_len = None, None, 0 query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) cand_toks, cand_masks = [], [] for cand_span in cand_spans: toks, mask = self.binarize_with_mask( cand_span.text, prefix, suffix, leading_space, trailing_space, ) cand_toks.append(toks) cand_masks.append(mask) # collate candidates cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad()) cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0) assert cand_toks.size() == cand_masks.size() candidate_tokens.append(cand_toks) candidate_masks.append(cand_masks) candidate_lengths.append(cand_toks.size(1)) labels.append(label) query_lengths = np.array(query_lengths) query_tokens = ListDataset(query_tokens, query_lengths) query_masks = ListDataset(query_masks, query_lengths) candidate_lengths = np.array(candidate_lengths) candidate_tokens = ListDataset(candidate_tokens, candidate_lengths) candidate_masks = ListDataset(candidate_masks, candidate_lengths) labels = ListDataset(labels, [1]*len(labels)) dataset = { 'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'labels': labels, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True), } nested_dataset = NestedDictionaryDataset( dataset, sizes=[query_lengths], ) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split] def build_dataset_for_inference(self, sample_json): with tempfile.NamedTemporaryFile(buffering=0) as h: h.write((json.dumps(sample_json) + '\n').encode('utf-8')) dataset = self.load_dataset( 'disambiguate_pronoun', data_path=h.name, return_only=True, ) return dataset def disambiguate_pronoun(self, model, sentence, use_cuda=False): sample_json = wsc_utils.convert_sentence_to_json(sentence) dataset = self.build_dataset_for_inference(sample_json) sample = dataset.collater([dataset[0]]) if use_cuda: sample = utils.move_to_cuda(sample) def get_masked_input(tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask.bool()] = self.mask return masked_tokens def get_lprobs(tokens, mask): logits, _ = model(src_tokens=get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores cand_lprobs = get_lprobs( sample['candidate_tokens'][0], sample['candidate_masks'][0], ) if sample['query_tokens'][0] is not None: query_lprobs = get_lprobs( sample['query_tokens'][0].unsqueeze(0), sample['query_masks'][0].unsqueeze(0), ) return (query_lprobs >= cand_lprobs).all().item() == 1 else: best_idx = cand_lprobs.argmax().item() full_cand = sample['candidate_tokens'][0][best_idx] mask = sample['candidate_masks'][0][best_idx] toks = full_cand[mask.bool()] return self.bpe.decode(self.source_dictionary.string(toks)).strip() @property def source_dictionary(self): return self.vocab @property def target_dictionary(self): return self.vocab @register_task('winogrande') class WinograndeTask(WSCTask): """ Task for WinoGrande dataset. Efficient implementation for Winograd schema tasks with exactly two candidates, one of which is correct. """ @classmethod def setup_task(cls, args, **kwargs): assert args.criterion == 'winogrande', 'Must set --criterion=winogrande' # load data and label dictionaries vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(vocab))) return cls(args, vocab) def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ if data_path is None: data_path = os.path.join(self.args.data, split + '.jsonl') if not os.path.exists(data_path): raise FileNotFoundError('Cannot find data: {}'.format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == 'test')) for sample in itr: sentence, pronoun_span, query, cand_text = sample prefix = sentence[:pronoun_span[0]].rstrip() suffix = sentence[pronoun_span[1]:] leading_space = ' ' if sentence[:pronoun_span[0]].endswith(' ') else '' trailing_space = '' if query is not None: query_toks, query_mask = self.binarize_with_mask( query, prefix, suffix, leading_space, trailing_space, ) query_len = len(query_toks) else: query_toks, query_mask, query_len = None, None, 0 query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) cand_toks, cand_mask = self.binarize_with_mask( cand_text, prefix, suffix, leading_space, trailing_space, ) candidate_tokens.append(cand_toks) candidate_masks.append(cand_mask) candidate_lengths.append(cand_toks.size(0)) query_lengths = np.array(query_lengths) def get_pad_dataset_fn(tokens, length, pad_idx): return PadDataset( ListDataset(tokens, length), pad_idx=pad_idx, left_pad=False, ) query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad()) query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0) candidate_lengths = np.array(candidate_lengths) candidate_tokens = get_pad_dataset_fn(candidate_tokens, candidate_lengths, self.vocab.pad()) candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0) dataset = { 'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True), } nested_dataset = NestedDictionaryDataset( dataset, sizes=[query_lengths], ) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset( nested_dataset, # shuffle sort_order=[shuffle], ) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split]
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/wsc/wsc_criterion.py
examples/roberta/wsc/wsc_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.data import encoders from fairseq.criterions import LegacyFairseqCriterion, register_criterion @register_criterion('wsc') class WSCCriterion(LegacyFairseqCriterion): def __init__(self, args, task): super().__init__(args, task) if self.args.save_predictions is not None: self.prediction_h = open(self.args.save_predictions, 'w') else: self.prediction_h = None self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) def __del__(self): if self.prediction_h is not None: self.prediction_h.close() @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument('--wsc-margin-alpha', type=float, metavar='A', default=1.0) parser.add_argument('--wsc-margin-beta', type=float, metavar='B', default=0.0) parser.add_argument('--wsc-cross-entropy', action='store_true', help='use cross entropy formulation instead of margin loss') parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to') def get_masked_input(self, tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask] = self.task.mask return masked_tokens def get_lprobs(self, model, tokens, mask): logits, _ = model(src_tokens=self.get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores def get_loss(self, query_lprobs, cand_lprobs): if self.args.wsc_cross_entropy: return F.cross_entropy( torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0), query_lprobs.new([0]).long(), ) else: return ( - query_lprobs + self.args.wsc_margin_alpha * ( cand_lprobs - query_lprobs + self.args.wsc_margin_beta ).clamp(min=0) ).sum() def forward(self, model, sample, reduce=True): # compute loss and accuracy loss, nloss = 0., 0 ncorrect, nqueries = 0, 0 for i, label in enumerate(sample['labels']): query_lprobs = self.get_lprobs( model, sample['query_tokens'][i].unsqueeze(0), sample['query_masks'][i].unsqueeze(0), ) cand_lprobs = self.get_lprobs( model, sample['candidate_tokens'][i], sample['candidate_masks'][i], ) pred = (query_lprobs >= cand_lprobs).all().item() if label is not None: label = 1 if label else 0 ncorrect += 1 if pred == label else 0 nqueries += 1 if label: # only compute a loss for positive instances nloss += 1 loss += self.get_loss(query_lprobs, cand_lprobs) id = sample['id'][i].item() if self.prediction_h is not None: print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h) if nloss == 0: loss = torch.tensor(0.0, requires_grad=True) sample_size = nqueries if nqueries > 0 else 1 logging_output = { 'loss': utils.item(loss.data) if reduce else loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size, 'ncorrect': ncorrect, 'nqueries': nqueries, } return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get('loss', 0) for log in logging_outputs) ntokens = sum(log.get('ntokens', 0) for log in logging_outputs) nsentences = sum(log.get('nsentences', 0) for log in logging_outputs) sample_size = sum(log.get('sample_size', 0) for log in logging_outputs) agg_output = { 'loss': loss_sum / sample_size / math.log(2), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size, } ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs) nqueries = sum(log.get('nqueries', 0) for log in logging_outputs) if nqueries > 0: agg_output['accuracy'] = ncorrect / float(nqueries) return agg_output @register_criterion('winogrande') class WinograndeCriterion(WSCCriterion): def forward(self, model, sample, reduce=True): # compute loss and accuracy query_lprobs = self.get_lprobs( model, sample['query_tokens'], sample['query_masks'], ) cand_lprobs = self.get_lprobs( model, sample['candidate_tokens'], sample['candidate_masks'], ) pred = query_lprobs >= cand_lprobs loss = self.get_loss(query_lprobs, cand_lprobs) sample_size = sample['query_tokens'].size(0) ncorrect = pred.sum().item() logging_output = { 'loss': utils.item(loss.data) if reduce else loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size, 'ncorrect': ncorrect, 'nqueries': sample_size, } return loss, sample_size, logging_output
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/roberta/wsc/__init__.py
examples/roberta/wsc/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import wsc_criterion # noqa from . import wsc_task # noqa
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/unsupervised_quality_estimation/meteor.py
examples/unsupervised_quality_estimation/meteor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import sys import subprocess import tempfile import math from itertools import combinations from collections import defaultdict def read_translations(path, n_repeats): segment_counter = 0 segment_translations = [] translations = defaultdict(list) for line in open(path): segment_translations.append(' '.join(line.split())) if len(segment_translations) == n_repeats: translations[segment_counter] = segment_translations segment_translations = [] segment_counter += 1 return translations def generate_input(translations, n_repeats): _, ref_path = tempfile.mkstemp() _, mt_path = tempfile.mkstemp() ref_fh = open(ref_path, 'w') mt_fh = open(mt_path, 'w') for segid in sorted(translations.keys()): assert len(translations[segid]) == n_repeats indexes = combinations(range(n_repeats), 2) for idx1, idx2 in indexes: mt_fh.write(translations[segid][idx1].strip() + '\n') ref_fh.write(translations[segid][idx2].strip() + '\n') sys.stderr.write('\nSaved translations to %s and %s' % (ref_path, mt_path)) return ref_path, mt_path def run_meteor(ref_path, mt_path, metric_path, lang='en'): _, out_path = tempfile.mkstemp() subprocess.call([ 'java', '-Xmx2G', '-jar', metric_path, mt_path, ref_path, '-p', '0.5 0.2 0.6 0.75', # default parameters, only changed alpha to give equal weight to P and R '-norm', '-l', lang], stdout=open(out_path, 'w')) os.remove(ref_path) os.remove(mt_path) sys.stderr.write('\nSaved Meteor output to %s' % out_path) return out_path def read_output(meteor_output_path, n_repeats): n_combinations = math.factorial(n_repeats)/(math.factorial(2) * math.factorial(n_repeats - 2)) raw_scores = [] average_scores = [] for line in open(meteor_output_path): if not line.startswith('Segment '): continue score = float(line.strip().split('\t')[1]) raw_scores.append(score) if len(raw_scores) == n_combinations: average_scores.append(sum(raw_scores)/n_combinations) raw_scores = [] os.remove(meteor_output_path) return average_scores def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input') parser.add_argument('-n', '--repeat_times', type=int) parser.add_argument('-m', '--meteor') parser.add_argument('-o', '--output') args = parser.parse_args() translations = read_translations(args.infile, args.repetitions) sys.stderr.write('\nGenerating input for Meteor...') ref_path, mt_path = generate_input(translations, args.repetitions) sys.stderr.write('\nRunning Meteor...') out_path = run_meteor(ref_path, mt_path, args.meteor) sys.stderr.write('\nReading output...') scores = read_output(out_path, args.repetitions) sys.stderr.write('\nWriting results...') with open(args.output, 'w') as o: for scr in scores: o.write('{}\n'.format(scr)) o.close() if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/unsupervised_quality_estimation/repeat_lines.py
examples/unsupervised_quality_estimation/repeat_lines.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import sys def _normalize_spaces(line): return ' '.join(line.split()) def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_file', required=True, type=str) parser.add_argument('-n', '--repeat_times', required=True, type=int) parser.add_argument('-o', '--output_file', required=False, type=str) args = parser.parse_args() stream = open(args.output_file, 'w') if args.output_file else sys.stdout for line in open(args.input_file): for _ in range(args.repeat_times): stream.write(_normalize_spaces(line) + '\n') if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/unsupervised_quality_estimation/aggregate_scores.py
examples/unsupervised_quality_estimation/aggregate_scores.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import numpy as np import sys aggregate_funcs = { 'std': np.std, 'var': np.var, 'median': np.median, 'mean': np.mean, 'min': np.min, 'max': np.max, } def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input_file', required=True, type=str) parser.add_argument('-n', '--repeat_times', required=True, type=int) parser.add_argument('-o', '--output_file', required=False) parser.add_argument('-f', '--func', required=False, default='mean') args = parser.parse_args() stream = open(args.output_file, 'w') if args.output_file else sys.stdout segment_scores = [] for line in open(args.input_file): segment_scores.append(float(line.strip())) if len(segment_scores) == args.repeat_times: stream.write('{}\n'.format(aggregate_funcs[args.func](segment_scores))) segment_scores = [] if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/translation_moe/score.py
examples/translation_moe/score.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Scoring script for computing pairwise BLEU and multi-ref BLEU over a set of candidate hypotheses. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. """ import argparse from itertools import chain import sys import random import numpy as np from sacrebleu import compute_bleu, corpus_bleu as _corpus_bleu def main(): parser = argparse.ArgumentParser(sys.argv[0]) parser.add_argument('--sys', nargs='*', default='', metavar='FILE', help='path to system output') parser.add_argument('--ref', default='', metavar='FILE', help='path to references') parser.add_argument('--output', default='', metavar='FILE', help='print outputs into a pretty format') args = parser.parse_args() if args.sys: src, tgt, hypos, log_probs = load_sys(args.sys) print('pairwise BLEU: %.2f' % pairwise(hypos)) if args.output: merge(src, tgt, hypos, log_probs, args.output) if args.ref: _, _, refs = load_ref(args.ref) if args.sys: multi_ref(refs, hypos) else: intra_ref(refs) def dictolist(d): a = sorted(d.items(), key=lambda i: i[0]) return [i[1] for i in a] def load_sys(paths): src, tgt, hypos, log_probs = {}, {}, {}, {} for path in paths: with open(path) as f: for line in f: line = line.rstrip() # S: source # T: target # D: detokenized system output if line.startswith(('S-', 'T-', 'D-')): i = int(line[line.find('-')+1:line.find('\t')]) if line.startswith('S-'): src[i] = line.split('\t')[1] if line.startswith('T-'): tgt[i] = line.split('\t')[1] if line.startswith('D-'): if i not in hypos: hypos[i] = [] log_probs[i] = [] hypos[i].append(line.split('\t')[2]) log_probs[i].append(float(line.split('\t')[1])) return dictolist(src), dictolist(tgt), dictolist(hypos), dictolist(log_probs) def load_ref(path): with open(path) as f: lines = f.readlines() src, tgt, refs = [], [], [] i = 0 while i < len(lines): if lines[i].startswith('S-'): src.append(lines[i].split('\t')[1].rstrip()) i += 1 elif lines[i].startswith('T-'): tgt.append(lines[i].split('\t')[1].rstrip()) i += 1 else: a = [] while i < len(lines) and lines[i].startswith('R'): a.append(lines[i].split('\t')[1].rstrip()) i += 1 refs.append(a) return src, tgt, refs def merge(src, tgt, hypos, log_probs, path): with open(path, 'w') as f: for s, t, hs, lps in zip(src, tgt, hypos, log_probs): f.write(s + '\n') f.write(t + '\n') f.write('\n') for h, lp in zip(hs, lps): f.write('\t%f\t%s\n' % (lp, h.strip())) f.write('------------------------------------------------------\n') def corpus_bleu(sys_stream, ref_streams): bleu = _corpus_bleu(sys_stream, ref_streams, tokenize='none') return bleu.score def sentence_bleu(hypothesis, reference): bleu = _corpus_bleu(hypothesis, reference) for i in range(1, 4): bleu.counts[i] += 1 bleu.totals[i] += 1 bleu = compute_bleu( bleu.counts, bleu.totals, bleu.sys_len, bleu.ref_len, smooth_method='exp', ) return bleu.score def pairwise(sents): _ref, _hypo = [], [] for s in sents: for i in range(len(s)): for j in range(len(s)): if i != j: _ref.append(s[i]) _hypo.append(s[j]) return corpus_bleu(_hypo, [_ref]) def multi_ref(refs, hypos): _ref, _hypo = [], [] ref_cnt = 0 assert len(refs) == len(hypos) # count number of refs covered for rs, hs in zip(refs, hypos): a = set() for h in hs: s = [sentence_bleu(h, r) for r in rs] j = np.argmax(s) _ref.append(rs[j]) _hypo.append(h) best = [k for k in range(len(rs)) if s[k] == s[j]] a.add(random.choice(best)) ref_cnt += len(a) print('#refs covered: %.2f' % (ref_cnt / len(refs))) # transpose refs and hypos refs = list(zip(*refs)) hypos = list(zip(*hypos)) # compute multi-ref corpus BLEU (leave-one-out to be comparable to intra_ref) k = len(hypos) m = len(refs) flat_hypos = [hypos[j][i] for i in range(len(hypos[0])) for j in range(k)] duplicated_refs = [ [ref for ref in refs_i for _ in range(k)] for refs_i in refs ] loo_bleus = [] for held_out_ref in range(m): remaining_refs = duplicated_refs[:held_out_ref] + duplicated_refs[held_out_ref+1:] assert len(remaining_refs) == m - 1 loo_bleus.append(corpus_bleu(flat_hypos, remaining_refs)) print('average multi-reference BLEU (leave-one-out): %.2f' % np.mean(loo_bleus)) def intra_ref(refs): print('ref pairwise BLEU: %.2f' % pairwise(refs)) refs = list(zip(*refs)) m = len(refs) concat_h = [] concat_rest = [[] for j in range(m - 1)] for i, h in enumerate(refs): rest = refs[:i] + refs[i+1:] concat_h.append(h) for j in range(m - 1): concat_rest[j].extend(rest[j]) concat_h = list(chain.from_iterable(concat_h)) bleu = corpus_bleu(concat_h, concat_rest) print('multi-reference BLEU (leave-one-out): %.2f' % bleu) if __name__ == '__main__': main()
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/translation_moe/src/logsumexp_moe.py
examples/translation_moe/src/logsumexp_moe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch class LogSumExpMoE(torch.autograd.Function): """Standard LogSumExp forward pass, but use *posterior* for the backward. See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade" (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_. """ @staticmethod def forward(ctx, logp, posterior, dim=-1): ctx.save_for_backward(posterior) ctx.dim = dim return torch.logsumexp(logp, dim=dim) @staticmethod def backward(ctx, grad_output): posterior, = ctx.saved_tensors grad_logp = grad_output.unsqueeze(ctx.dim) * posterior return grad_logp, None, None
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false
ofirpress/shortformer
https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/translation_moe/src/mean_pool_gating_network.py
examples/translation_moe/src/mean_pool_gating_network.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn.functional as F class MeanPoolGatingNetwork(torch.nn.Module): """A simple mean-pooling gating network for selecting experts. This module applies mean pooling over an encoder's output and returns reponsibilities for each expert. The encoder format is expected to match :class:`fairseq.models.transformer.TransformerEncoder`. """ def __init__(self, embed_dim, num_experts, dropout=None): super().__init__() self.embed_dim = embed_dim self.num_experts = num_experts self.fc1 = torch.nn.Linear(embed_dim, embed_dim) self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None self.fc2 = torch.nn.Linear(embed_dim, num_experts) def forward(self, encoder_out): if not ( hasattr(encoder_out, 'encoder_out') and hasattr(encoder_out, 'encoder_padding_mask') and encoder_out.encoder_out.size(2) == self.embed_dim ): raise ValueError('Unexpected format for encoder_out') # mean pooling over time encoder_padding_mask = encoder_out.encoder_padding_mask # B x T encoder_out = encoder_out.encoder_out.transpose(0, 1) # B x T x C if encoder_padding_mask is not None: encoder_out = encoder_out.clone() # required because of transpose above encoder_out[encoder_padding_mask] = 0 ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True) x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out) else: x = torch.mean(encoder_out, dim=1) x = torch.tanh(self.fc1(x)) if self.dropout is not None: x = self.dropout(x) x = self.fc2(x) return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
python
MIT
edc411ff896ae042c01d939a32c1e4a33e238083
2026-01-05T07:14:08.244122Z
false