code stringlengths 17 6.64M |
|---|
def cli_main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert ((args.sys == '-') or os.path.exists(args.sys)), 'System output file {} does not exist'.format(args.sys)
assert os.path.exists(args.ref), 'Reference file {} does not exist'.format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
(yield line.lower())
else:
(yield line)
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]))
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for (i, (sys_tok, ref_tok)) in enumerate(zip(readlines(fdsys), readlines(fdref))):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for (sys_tok, ref_tok) in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if (args.sys == '-'):
score(sys.stdin)
else:
with open(args.sys, 'r') as f:
score(f)
|
def main(args, init_distributed=False):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
if (torch.cuda.is_available() and (not args.cpu)):
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
logger.info(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=0)
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('num. model params: {:.2f} M (num. trained: {:.2f} M)'.format((sum((p.numel() for p in model.parameters())) / 1000000.0), (sum((p.numel() for p in model.parameters() if p.requires_grad)) / 1000000.0)))
trainer = Trainer(args, task, model, criterion)
logger.info('LR scheduler: {}'.format(trainer.lr_scheduler))
logger.info('training on {} GPUs with update freq of {}'.format(args.distributed_world_size, args.update_freq))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences))
(extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer)
max_epoch = (args.max_epoch or math.inf)
max_update = (args.max_update or math.inf)
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while ((lr > args.min_lr) and ((epoch_itr.epoch < max_epoch) or (epoch_itr._next_epoch_itr is not None)) and (trainer.get_num_updates() < max_update)):
train(args, trainer, task, epoch_itr)
if ((not args.disable_validation) and ((epoch_itr.epoch % args.validate_interval) == 0)):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
if ((epoch_itr.epoch % args.save_interval) == 0):
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if should_stop_early(args, valid_losses[0]):
logger.info("early stop since valid performance hasn't improved for last {} runs".format(args.patience))
break
epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=(os.pathsep in getattr(args, 'data', '')))
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum))
|
def should_stop_early(args, valid_loss):
if (args.patience <= 0):
return False
def is_better(a, b):
return ((a > b) if args.maximize_best_checkpoint_metric else (a < b))
prev_best = getattr(should_stop_early, 'best', None)
if ((prev_best is None) or is_better(valid_loss, prev_best)):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
return (should_stop_early.num_runs > args.patience)
|
@metrics.aggregate('train')
def train(args, trainer, task, epoch_itr):
'Train the model for one epoch.'
itr = epoch_itr.next_epoch_itr(fix_batches_to_gpus=args.fix_batches_to_gpus, shuffle=(epoch_itr.epoch >= args.curriculum))
update_freq = (args.update_freq[(epoch_itr.epoch - 1)] if (epoch_itr.epoch <= len(args.update_freq)) else args.update_freq[(- 1)])
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, no_progress_bar='simple')
task.begin_epoch(epoch_itr.epoch, trainer.get_model())
valid_subsets = args.valid_subset.split(',')
max_update = (args.max_update or math.inf)
for samples in progress:
log_output = trainer.train_step(samples)
num_updates = trainer.get_num_updates()
if (log_output is None):
continue
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.log(stats, tag='train', step=num_updates)
if ((not args.disable_validation) and (args.save_interval_updates > 0) and ((num_updates % args.save_interval_updates) == 0) and (num_updates > 0)):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if (num_updates >= max_update):
break
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
metrics.reset_meters('train')
|
def get_training_stats(stats):
if (('nll_loss' in stats) and ('ppl' not in stats)):
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
|
def validate(args, trainer, task, epoch_itr, subsets):
'Evaluate the model on the validation set(s) and return the losses.'
if (args.fixed_validation_seed is not None):
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
itr = task.get_batch_iterator(dataset=task.dataset(subset), max_tokens=args.max_tokens_valid, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions(task.max_positions(), trainer.get_model().max_positions()), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, num_workers=args.num_workers).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(args, itr, epoch_itr.epoch, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple')
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
|
def get_valid_stats(args, trainer, stats):
if (('nll_loss' in stats) and ('ppl' not in stats)):
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = (max if args.maximize_best_checkpoint_metric else min)
stats[key] = best_function(checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric])
return stats
|
def distributed_main(i, args, start_rank=0):
args.device_id = i
if (args.distributed_rank is None):
args.distributed_rank = (start_rank + i)
main(args, init_distributed=True)
|
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if (args.distributed_init_method is None):
distributed_utils.infer_init_method(args)
if (args.distributed_init_method is not None):
if ((torch.cuda.device_count() > 1) and (not args.distributed_no_spawn)):
start_rank = args.distributed_rank
args.distributed_rank = None
torch.multiprocessing.spawn(fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count())
else:
distributed_main(args.device_id, args)
elif (args.distributed_world_size > 1):
assert (args.distributed_world_size <= torch.cuda.device_count())
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None
if ((max(args.update_freq) > 1) and (args.ddp_backend != 'no_c10d')):
logger.info('NOTE: you may get faster training with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(fn=distributed_main, args=(args,), nprocs=args.distributed_world_size)
else:
main(args)
|
def main(args, override_args=None):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = (torch.cuda.is_available() and (not args.cpu))
if (override_args is not None):
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
logger.info('loading model(s) from {}'.format(args.path))
(models, model_args, task) = checkpoint_utils.load_model_ensemble_and_task([args.path], arg_overrides=overrides)
model = models[0]
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
logger.info(model_args)
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=0)
dataset = task.dataset(subset)
except KeyError:
raise Exception(('Cannot find dataset: ' + subset))
itr = task.get_batch_iterator(dataset=dataset, max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(task.max_positions(), *[m.max_positions() for m in models]), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_workers=args.num_workers).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(args, itr, prefix="valid on '{}' subset".format(subset), no_progress_bar='simple')
log_outputs = []
for (i, sample) in enumerate(progress):
sample = (utils.move_to_cuda(sample) if use_cuda else sample)
(_loss, _sample_size, log_output) = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
|
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
main(args, override_args)
|
def run_experiment(args):
max_update = args.max_updates
warmup_update = args.warmup_updates
lr_period_updates = (max_update - warmup_update)
max_tokens = args.max_tokens
tokens_per_sample = args.tokens_per_sample
update_freq = args.update_freq
num_gpus = args.num_gpus
data_dir = args.data_dir
results_dir = args.save_dir
d_m = args.d_m
if (d_m not in TESTED_DIMS):
print_warning_message('We have only tested for {}. Got {}'.format(TESTED_DIMS, d_m))
max_lr = min(round(((1024.0 / d_m) * LR_1024), 3), 0.01)
job_name = 'delight_out_{}'.format(d_m)
results_dir = '{}/{}'.format(results_dir, job_name)
if (not os.path.isdir(results_dir)):
os.makedirs(results_dir)
log_file = '{}/logs.txt'.format(results_dir)
command = ["python train.py {} --task language_modeling --arch delight_transformer_lm_wiki103 --log-interval 1000 --no-progress-bar --seed 1 --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 --criterion adaptive_loss --sample-break-mode none --skip-invalid-size-inputs-valid-test --update-freq {} --keep-last-epochs 10 --ddp-backend=no_c10d --max-tokens {} --tokens-per-sample {} --max-update {} --warmup-updates {} --lr-period-updates {} --lr-scheduler cosine --warmup-init-lr 1e-7 --lr-shrink 1 --max-lr {} --lr 1e-7 --min-lr 1e-9 --t-mult 1 --save-dir {} --distributed-world-size {} --distributed-port 50786 --delight-emb-map-dim 128 --delight-emb-out-dim {} --delight-dec-min-depth 4 --delight-dec-max-depth 12 --delight-dec-width-mult 2 | tee -a {}".format(data_dir, update_freq, max_tokens, tokens_per_sample, max_update, warmup_update, lr_period_updates, max_lr, results_dir, num_gpus, d_m, log_file)]
print_log_message('Training command: ')
print(command[0])
os.system(command[0])
|
def run_experiment(args):
max_update = args.max_updates
warmup_update = args.warmup_updates
lr_period_updates = (max_update - warmup_update)
max_tokens = args.max_tokens
update_freq = args.update_freq
num_gpus = args.num_gpus
data_dir = args.data_dir
results_dir = args.save_dir
d_m = args.d_m
if (d_m not in TESTED_DIMS):
print_warning_message('We have only tested for {}. Got {}'.format(TESTED_DIMS, d_m))
max_lr = min(round(((640 / d_m) * LR_640), 3), 0.01)
job_name = 'delight_out_{}'.format(d_m)
results_dir = '{}/{}'.format(results_dir, job_name)
if (not os.path.isdir(results_dir)):
os.makedirs(results_dir)
log_file = '{}/logs.txt'.format(results_dir)
command = ["python train.py {} --arch delight_transformer_wmt14_en_de --log-interval 1000 --no-progress-bar --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --update-freq {} --keep-last-epochs 10 --ddp-backend=no_c10d --max-tokens {} --max-update {} --warmup-updates {} --lr-period-updates {} --lr-scheduler cosine --warmup-init-lr 1e-7 --lr-shrink 1 --max-lr {} --lr 1e-7 --min-lr 1e-9 --t-mult 1 --save-dir {} --distributed-world-size {} --distributed-port 50786 --delight-emb-map-dim 128 --delight-emb-out-dim {} --delight-enc-min-depth 4 --delight-enc-max-depth 8 --delight-enc-width-mult 2 --delight-dec-min-depth 4 --delight-dec-max-depth 8 --delight-dec-width-mult 2 | tee -a {}".format(data_dir, update_freq, max_tokens, max_update, warmup_update, lr_period_updates, max_lr, results_dir, num_gpus, d_m, log_file)]
print_log_message('Training command: ')
print(command[0])
os.system(command[0])
|
def run_experiment(args):
max_update = args.max_updates
warmup_update = args.warmup_updates
lr_period_updates = 70000
max_tokens = args.max_tokens
update_freq = args.update_freq
num_gpus = args.num_gpus
data_dir = args.data_dir
results_dir = args.save_dir
d_m = args.d_m
if (d_m not in TESTED_DIMS):
print_warning_message('We have only tested for {}. Got {}'.format(TESTED_DIMS, d_m))
max_lr = min(round(((640 / d_m) * LR_640), 3), 0.01)
job_name = 'delight_out_{}'.format(d_m)
results_dir = '{}/{}'.format(results_dir, job_name)
if (not os.path.isdir(results_dir)):
os.makedirs(results_dir)
log_file = '{}/logs.txt'.format(results_dir)
command = ["python train.py {} --arch delight_transformer_wmt14_en_fr --log-interval 100 --no-progress-bar --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --update-freq {} --keep-last-epochs 10 --ddp-backend=no_c10d --max-tokens {} --max-update {} --warmup-updates {} --lr-period-updates {} --lr-scheduler cosine --warmup-init-lr 1e-7 --lr-shrink 1 --max-lr {} --lr 1e-7 --min-lr 1e-9 --t-mult 1 --save-dir {} --distributed-world-size {} --distributed-port 50786 --delight-emb-map-dim 128 --delight-emb-out-dim {} --delight-enc-min-depth 4 --delight-enc-max-depth 8 --delight-enc-width-mult 2 --delight-dec-min-depth 4 --delight-dec-max-depth 8 --delight-dec-width-mult 2 | tee -a {}".format(data_dir, update_freq, max_tokens, max_update, warmup_update, lr_period_updates, max_lr, results_dir, num_gpus, d_m, log_file)]
print_log_message('Training command: ')
print(command[0])
os.system(command[0])
|
def run_experiment(args):
max_update = args.max_updates
warmup_update = args.warmup_updates
max_tokens = args.max_tokens
update_freq = args.update_freq
num_gpus = args.num_gpus
data_dir = args.data_dir
results_dir = args.save_dir
d_m = args.d_m
if (d_m not in TESTED_DIMS):
print_warning_message('We have only tested for {}. Got {}'.format(TESTED_DIMS, d_m))
max_lr = min(round(((512.0 / d_m) * LR_512), 4), 0.01)
job_name = 'delight_out_{}'.format(d_m)
results_dir = '{}/{}'.format(results_dir, job_name)
if (not os.path.isdir(results_dir)):
os.makedirs(results_dir)
log_file = '{}/logs.txt'.format(results_dir)
command = ["python train.py {} --arch delight_transformer_wmt16_en_ro --no-progress-bar --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --min-lr 1e-09 --update-freq {} --keep-last-epochs 10 --ddp-backend=no_c10d --max-tokens {} --max-update {} --warmup-updates {} --lr-scheduler linear --warmup-init-lr 1e-7 --lr {} --min-lr 1e-9 --t-mult 1 --save-dir {} --distributed-world-size {} --distributed-port 50786 --delight-emb-map-dim 128 --delight-emb-out-dim {} --delight-enc-min-depth 4 --delight-enc-max-depth 8 --delight-enc-width-mult 2 --delight-dec-min-depth 4 --delight-dec-max-depth 8 --delight-dec-width-mult 2 | tee -a {}".format(data_dir, update_freq, max_tokens, max_update, warmup_update, max_lr, results_dir, num_gpus, d_m, log_file)]
print_log_message('Training command: ')
print(command[0])
os.system(command[0])
|
def average_checkpoints(inputs):
"Loads checkpoints from inputs and returns a model with averaged weights.\n\n Args:\n inputs: An iterable of string paths of checkpoints to load from.\n\n Returns:\n A dict of string keys mapping to various values. The 'model' key\n from the returned dict should correspond to an OrderedDict mapping\n string parameter names to torch Tensors.\n "
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(f, map_location=(lambda s, _: torch.serialization.default_restore_location(s, 'cpu')))
if (new_state is None):
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if (params_keys is None):
params_keys = model_params_keys
elif (params_keys != model_params_keys):
raise KeyError('For checkpoint {}, expected list of params: {}, but found: {}'.format(f, params_keys, model_params_keys))
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if (k not in params_dict):
params_dict[k] = p.clone()
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for (k, v) in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state
|
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert (len(paths) == 1)
path = paths[0]
if update_based:
pt_regexp = re.compile('checkpoint_\\d+_(\\d+)\\.pt')
else:
pt_regexp = re.compile('checkpoint(\\d+)\\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if (m is not None):
sort_key = int(m.group(1))
if ((upper_bound is None) or (sort_key <= upper_bound)):
entries.append((sort_key, m.group(0)))
if (len(entries) < n):
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
|
def main():
parser = argparse.ArgumentParser(description='Tool to average the params of input checkpoints to produce a new checkpoint')
parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which checkpoint to use, e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.')
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if (args.num_update_checkpoints is not None):
num = args.num_update_checkpoints
is_update_based = True
elif (args.num_epoch_checkpoints is not None):
num = args.num_epoch_checkpoints
assert ((args.checkpoint_upper_bound is None) or (args.num_epoch_checkpoints is not None)), '--checkpoint-upper-bound requires --num-epoch-checkpoints'
assert ((args.num_epoch_checkpoints is None) or (args.num_update_checkpoints is None)), 'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if (num is not None):
args.inputs = last_n_checkpoints(args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
|
def main():
parser = argparse.ArgumentParser(description='symmetric alignment builer')
parser.add_argument('--fast_align_dir', help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir', help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic', help='heuristic to use for symmetrization', default='grow-diag-final-and')
parser.add_argument('--source_file', help='path to a file with sentences in the source language')
parser.add_argument('--target_file', help='path to a file with sentences in the target language')
parser.add_argument('--output_dir', help='output directory')
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
sym_fast_align_bin = os.path.join(args.mosesdecoder_dir, 'scripts', 'ems', 'support', 'symmetrize-fast-align.perl')
joined_file = os.path.join(args.output_dir, 'text.joined')
with open(args.source_file, 'r', encoding='utf-8') as src, open(args.target_file, 'r', encoding='utf-8') as tgt:
with open(joined_file, 'w', encoding='utf-8') as joined:
for (s, t) in zip_longest(src, tgt):
print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
fwd_align_file = os.path.join(args.output_dir, 'align.forward')
fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file)
assert (os.system(fwd_fast_align_cmd) == 0)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file)
assert (os.system(bwd_fast_align_cmd) == 0)
sym_out_file = os.path.join(args.output_dir, 'aligned')
sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(SYMFASTALIGN=sym_fast_align_bin, FWD=fwd_align_file, BWD=bwd_align_file, SRC=args.source_file, TGT=args.target_file, OUT=sym_out_file, HEURISTIC=args.sym_heuristic, SYMAL=symal_bin)
assert (os.system(sym_cmd) == 0)
|
def main():
ns1 = eval(input('Namespace 1: '))
ns2 = eval(input('Namespace 2: '))
def keys(ns):
ks = set()
for k in dir(ns):
if (not k.startswith('_')):
ks.add(k)
return ks
k1 = keys(ns1)
k2 = keys(ns2)
def print_keys(ks, ns1, ns2=None):
for k in ks:
if (ns2 is None):
print('{}\t{}'.format(k, getattr(ns1, k, None)))
else:
print('{}\t{}\t{}'.format(k, getattr(ns1, k, None), getattr(ns2, k, None)))
print('Keys unique to namespace 1:')
print_keys((k1 - k2), ns1)
print()
print('Keys unique to namespace 2:')
print_keys((k2 - k1), ns2)
print()
print('Overlapping keys with different values:')
ks = [k for k in (k1 & k2) if (getattr(ns1, k, 'None') != getattr(ns2, k, 'None'))]
print_keys(ks, ns1, ns2)
print()
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--gzip', action='store_true')
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, 'r')
else:
return open(args.input, 'r', encoding='utf-8')
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for (i, line) in enumerate(h):
if (len(line.strip()) == 0):
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if ((i % 1000000) == 0):
print(i, file=sys.stderr, end='', flush=True)
elif ((i % 100000) == 0):
print('.', file=sys.stderr, end='', flush=True)
print(file=sys.stderr, flush=True)
print('found {} docs'.format(num_docs))
print('average num lines per doc: {}'.format(np.mean(num_lines)))
print('average num toks per doc: {}'.format(np.mean(num_toks)))
|
def get_parser():
parser = argparse.ArgumentParser(description='writes text from binarized file to stdout')
parser.add_argument('--dataset-impl', help='dataset implementation', choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
return parser
|
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = (Dictionary.load(args.dict) if (args.dict is not None) else None)
dataset = data_utils.load_indexed_dataset(args.input, dictionary, dataset_impl=args.dataset_impl, default='lazy')
for tensor_line in dataset:
if (dictionary is None):
line = ' '.join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
|
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if (m is not None):
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if (m is not None):
entries.append((int(m.group(1)), m.group(0)))
return entries
|
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
|
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::(- n)])]
|
def main():
parser = argparse.ArgumentParser(description='Recursively delete checkpoint files from `root_dir`, but preserve checkpoint_best.pt and checkpoint_last.pt')
parser.add_argument('root_dirs', nargs='*')
parser.add_argument('--save-last', type=int, default=0, help='number of last checkpoints to save')
parser.add_argument('--save-every', type=int, default=0, help='interval of checkpoints to save')
parser.add_argument('--preserve-test', action='store_true', help='preserve checkpoints in dirs that start with test_ prefix (default: delete them)')
parser.add_argument('--delete-best', action='store_true', help='delete checkpoint_best.pt')
parser.add_argument('--delete-last', action='store_true', help='delete checkpoint_last.pt')
parser.add_argument('--no-dereference', action='store_true', help="don't dereference symlinks")
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for (root, _subdirs, files) in os.walk(root_dir):
if (args.save_last > 0):
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if (args.save_every > 0):
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if (not pt_regexp.fullmatch(file)):
continue
full_path = os.path.join(root, file)
if (((not os.path.basename(root).startswith('test_')) or args.preserve_test) and (((file == 'checkpoint_last.pt') and (not args.delete_last)) or ((file == 'checkpoint_best.pt') and (not args.delete_best)) or (file in to_save))):
if (os.path.islink(full_path) and (not args.no_dereference)):
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if ((len(files_to_desymlink) == 0) and (len(files_to_delete) == 0)):
print('Nothing to do.')
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print('Operations to perform (in order):')
if (len(files_to_desymlink) > 0):
for file in files_to_desymlink:
print((' - preserve (and dereference symlink): ' + file))
if (len(files_to_preserve) > 0):
for file in files_to_preserve:
print((' - preserve: ' + file))
if (len(files_to_delete) > 0):
for file in files_to_delete:
print((' - delete: ' + file))
while True:
resp = input('Continue? (Y/N): ')
if (resp.strip().lower() == 'y'):
break
elif (resp.strip().lower() == 'n'):
sys.exit(0)
print('Executing...')
if (len(files_to_desymlink) > 0):
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print(('rm ' + file))
os.remove(file)
print('cp {} {}'.format(realpath, file))
shutil.copyfile(realpath, file)
if (len(files_to_delete) > 0):
for file in files_to_delete:
print(('rm ' + file))
os.remove(file)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--num-shards', type=int)
args = parser.parse_args()
assert ((args.num_shards is not None) and (args.num_shards > 1))
with open(args.input, 'r', encoding='utf-8') as h:
with contextlib.ExitStack() as stack:
outputs = [stack.enter_context(open(((args.input + '.shard') + str(i)), 'w', encoding='utf-8')) for i in range(args.num_shards)]
doc = []
first_doc = ([True] * args.num_shards)
def output_doc(i):
if (not first_doc[i]):
outputs[i].write('\n')
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if (line.strip() == ''):
output_doc((num_docs % args.num_shards))
num_docs += 1
else:
doc.append(line)
output_doc((num_docs % args.num_shards))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('sample_output', help='train output file')
parser.add_argument('remainder_output', help='valid output file')
parser.add_argument('-k', type=int, help='remainder size')
parser.add_argument('--lines', action='store_true', help='split lines instead of docs')
args = parser.parse_args()
assert (args.k is not None)
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if (len(sample) < args.k):
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange((i + 1))
if (j < args.k):
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, 'r', encoding='utf-8') as h:
doc = []
for (i, line) in enumerate(h):
if (line.strip() == ''):
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if ((i % 1000000) == 0):
print(i, file=sys.stderr, end='', flush=True)
elif ((i % 100000) == 0):
print('.', file=sys.stderr, end='', flush=True)
if (len(doc) > 0):
update_sample(doc)
print(file=sys.stderr, flush=True)
assert (len(sample) == args.k)
with open(args.sample_output, 'w', encoding='utf-8') as out:
first = True
for doc in sample:
if ((not first) and (not args.lines)):
out.write('\n')
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, 'w', encoding='utf-8') as out:
first = True
for doc in remainder:
if ((not first) and (not args.lines)):
out.write('\n')
first = False
for line in doc:
out.write(line)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True, help='sentencepiece model to use for decoding')
parser.add_argument('--input', required=True, help='input file to decode')
parser.add_argument('--input_format', choices=['piece', 'id'], default='piece')
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if (args.input_format == 'piece'):
def decode(l):
return ''.join(sp.DecodePieces(l))
elif (args.input_format == 'id'):
def decode(l):
return ''.join(sp.DecodeIds(l))
else:
raise NotImplementedError
def tok2int(tok):
return (int(tok) if (tok != '<<unk>>') else 0)
with open(args.input, 'r', encoding='utf-8') as h:
for line in h:
if (args.input_format == 'id'):
print(decode(list(map(tok2int, line.rstrip().split()))))
elif (args.input_format == 'piece'):
print(decode(line.rstrip().split()))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True, help='sentencepiece model to use for encoding')
parser.add_argument('--inputs', nargs='+', default=['-'], help='input files to filter/encode')
parser.add_argument('--outputs', nargs='+', default=['-'], help='path to save encoded outputs')
parser.add_argument('--output_format', choices=['piece', 'id'], default='piece')
parser.add_argument('--min-len', type=int, metavar='N', help='filter sentence pairs with fewer than N tokens')
parser.add_argument('--max-len', type=int, metavar='N', help='filter sentence pairs with more than N tokens')
args = parser.parse_args()
assert (len(args.inputs) == len(args.outputs)), 'number of input and output paths should match'
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if (args.output_format == 'piece'):
def encode(l):
return sp.EncodeAsPieces(l)
elif (args.output_format == 'id'):
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if ((args.min_len is not None) or (args.max_len is not None)):
def valid(line):
return (((args.min_len is None) or (len(line) >= args.min_len)) and ((args.max_len is None) or (len(line) <= args.max_len)))
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [(stack.enter_context(open(input, 'r', encoding='utf-8')) if (input != '-') else sys.stdin) for input in args.inputs]
outputs = [(stack.enter_context(open(output, 'w', encoding='utf-8')) if (output != '-') else sys.stdout) for output in args.outputs]
stats = {'num_empty': 0, 'num_filtered': 0}
def encode_line(line):
line = line.strip()
if (len(line) > 0):
line = encode(line)
if valid(line):
return line
else:
stats['num_filtered'] += 1
else:
stats['num_empty'] += 1
return None
for (i, lines) in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if (not any(((enc_line is None) for enc_line in enc_lines))):
for (enc_line, output_h) in zip(enc_lines, outputs):
print(' '.join(enc_line), file=output_h)
if ((i % 10000) == 0):
print('processed {} lines'.format(i), file=sys.stderr)
print('skipped {} empty lines'.format(stats['num_empty']), file=sys.stderr)
print('filtered {} lines'.format(stats['num_filtered']), file=sys.stderr)
|
def read_audio(fname):
' Load an audio file and return PCM along with the sample rate '
(wav, sr) = sf.read(fname)
assert (sr == 16000.0)
return (wav, 16000.0)
|
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint['args']
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint['model'])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return (z, c)
|
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__('Pre-compute embeddings for wav2letter++ datasets')
kwargs = {'action': 'store', 'type': str, 'required': True}
self.add_argument('--input', '-i', help='Input Directory', **kwargs)
self.add_argument('--output', '-o', help='Output Directory', **kwargs)
self.add_argument('--model', help='Path to model checkpoint', **kwargs)
self.add_argument('--split', help='Dataset Splits', nargs='+', **kwargs)
self.add_argument('--ext', default='wav', required=False, help='Audio file extension')
self.add_argument('--no-copy-labels', action='store_true', help='Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.')
self.add_argument('--use-feat', action='store_true', help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument('--gpu', help='GPU to use', default=0, type=int)
|
class Prediction():
' Lightweight wrapper around a fairspeech embedding model '
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
(z, c) = self.model(x.unsqueeze(0))
return (z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy())
|
class H5Writer():
' Write features as hdf5 file in wav2letter++ compatible format '
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
(channel, T) = data.shape
with h5py.File(self.fname, 'w') as out_ds:
data = data.T.flatten()
out_ds['features'] = data
out_ds['info'] = np.array([(16000.0 // 160), T, channel])
|
class EmbeddingDatasetWriter(object):
' Given a model and a wav2letter++ dataset, pre-compute and store embeddings\n\n Args:\n input_root, str :\n Path to the wav2letter++ dataset\n output_root, str :\n Desired output directory. Will be created if non-existent\n split, str :\n Dataset split\n '
def __init__(self, input_root, output_root, split, model_fname, extension='wav', gpu=0, verbose=False, use_feat=False):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if (fname is None):
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if (fname is None):
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter((lambda x: (self.extension not in x)), glob.glob(self.get_input_path('*'))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path('*.{}'.format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map((lambda x: os.path.join(self.output_path, x.replace(('.' + self.extension), '.h5context'))), map(os.path.basename, paths))
for (name, target_fname) in self._progress(zip(paths, fnames_context), total=len(self)):
(wav, sr) = read_audio(name)
(z, c) = self.model(wav)
feat = (z if self.use_feat else c)
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return 'EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})'.format(n_files=len(self), **self.__dict__)
|
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index')
parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D', help='percentage of data to use as validation set (between 0 and 1)')
parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory')
parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for')
parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed')
parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG', help='if set, path must contain this substring for a file to be included in the manifest')
return parser
|
def main(args):
assert ((args.valid_percent >= 0) and (args.valid_percent <= 1.0))
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, ('**/*.' + args.ext))
rand = random.Random(args.seed)
with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open(os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f:
print(dir_path, file=train_f)
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if (args.path_must_contain and (args.path_must_contain not in file_path)):
continue
frames = soundfile.info(fname).frames
dest = (train_f if (rand.random() > args.valid_percent) else valid_f)
print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest)
|
class NumpyExtension(Extension):
'Source: https://stackoverflow.com/a/54128391'
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return (self.__include_dirs + [numpy.get_include()])
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
|
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
for (id, _) in enumerate(range(vocab_size)):
dummy_dict.add_symbol('{}'.format(id), 1000)
return dummy_dict
|
class DummyTask(FairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, 'ctc', False):
self.dictionary.add_symbol('<ctc_blank>')
self.tgt_dict = self.dictionary
@property
def target_dictionary(self):
return self.dictionary
|
def get_dummy_task_and_parser():
'\n to build a fariseq model, we need some dummy parse and task. This function\n is used to create dummy task and parser to faciliate model/criterion test\n\n Note: we use FbSpeechRecognitionTask as the dummy task. You may want\n to use other task by providing another function\n '
parser = argparse.ArgumentParser(description='test_dummy_s2s_task', argument_default=argparse.SUPPRESS)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return (task, parser)
|
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
feature = torch.randn(B, T, D)
src_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B, dtype=np.int64))
src_lengths[0] = T
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=(src_lengths[b].item() + 1))
tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False)
(src_lengths, sorted_order) = src_lengths.sort(descending=True)
forward_input['src_tokens'] = feature.index_select(0, sorted_order)
forward_input['src_lengths'] = src_lengths
forward_input['prev_output_tokens'] = prev_output_tokens
return forward_input
|
def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
'\n This only provides an example to generate dummy encoder output\n '
(T, B, D) = encoder_out_shape
encoder_out = {}
encoder_out['encoder_out'] = torch.from_numpy(np.random.randn(*encoder_out_shape).astype(np.float32))
seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))
encoder_out['encoder_padding_mask'] = (torch.arange(T).view(1, T).expand(B, (- 1)) >= seq_lengths.view(B, 1).expand((- 1), T))
encoder_out['encoder_padding_mask'].t_()
return encoder_out
|
def _current_postion_info():
cf = currentframe()
frameinfo = ' (at {}:{})'.format(os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno)
return frameinfo
|
def check_encoder_output(encoder_output, batch_size=None):
'we expect encoder_output to be a dict with the following\n key/value pairs:\n - encoder_out: a Torch.Tensor\n - encoder_padding_mask: a binary Torch.Tensor\n '
if (not isinstance(encoder_output, dict)):
msg = ('FairseqEncoderModel.forward(...) must be a dict' + _current_postion_info())
return (False, msg)
if ('encoder_out' not in encoder_output):
msg = ('FairseqEncoderModel.forward(...) must contain encoder_out' + _current_postion_info())
return (False, msg)
if ('encoder_padding_mask' not in encoder_output):
msg = ('FairseqEncoderModel.forward(...) must contain encoder_padding_mask' + _current_postion_info())
return (False, msg)
if (not isinstance(encoder_output['encoder_out'], torch.Tensor)):
msg = ('encoder_out must be a torch.Tensor' + _current_postion_info())
return (False, msg)
if (encoder_output['encoder_out'].dtype != torch.float32):
msg = ('encoder_out must have float32 dtype' + _current_postion_info())
return (False, msg)
mask = encoder_output['encoder_padding_mask']
if (mask is not None):
if (not isinstance(mask, torch.Tensor)):
msg = ('encoder_padding_mask must be a torch.Tensor' + _current_postion_info())
return (False, msg)
if ((mask.dtype != torch.uint8) and ((not hasattr(torch, 'bool')) or (mask.dtype != torch.bool))):
msg = ('encoder_padding_mask must have dtype of uint8' + _current_postion_info())
return (False, msg)
if (mask.dim() != 2):
msg = ('we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)' + _current_postion_info())
return (False, msg)
if ((batch_size is not None) and (mask.size(1) != batch_size)):
msg = (('we expect encoder_padding_mask to be a 2-d tensor, with size(1)' + ' being the batch size') + _current_postion_info())
return (False, msg)
return (True, None)
|
def check_decoder_output(decoder_output):
'we expect output from a decoder is a tuple with the following constraint:\n - the first element is a torch.Tensor\n - the second element can be anything (reserved for future use)\n '
if (not isinstance(decoder_output, tuple)):
msg = ('FariseqDecoder output must be a tuple' + _current_postion_info())
return (False, msg)
if (len(decoder_output) != 2):
msg = ('FairseqDecoder output must be 2-elem tuple' + _current_postion_info())
return (False, msg)
if (not isinstance(decoder_output[0], torch.Tensor)):
msg = ('FariseqDecoder output[0] must be a torch.Tensor' + _current_postion_info())
return (False, msg)
return (True, None)
|
class TestBaseFairseqModelBase(unittest.TestCase):
'\n This class is used to facilitate writing unittest for any class derived from\n `BaseFairseqModel`.\n '
@classmethod
def setUpClass(cls):
if (cls is TestBaseFairseqModelBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass
|
class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
'\n base code to test FairseqEncoderDecoderModel (formally known as\n `FairseqModel`) must be derived from this base class\n '
@classmethod
def setUpClass(cls):
if (cls is TestFairseqEncoderDecoderModelBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)), msg='This class only tests for FairseqModel subclasses')
(task, parser) = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if (extra_args_setters is not None):
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = (get_dummy_input() if (input is None) else input)
def setUp(self):
super().setUp()
def test_forward(self):
if (self.model and self.forward_input):
forward_output = self.model.forward(**self.forward_input)
(succ, msg) = check_decoder_output(forward_output)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if (self.model and self.forward_input):
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
self.assertTrue(hasattr(logprob, 'batch_first'))
self.assertTrue(hasattr(prob, 'batch_first'))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
|
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
'\n base class to test FairseqEncoderModel\n '
@classmethod
def setUpClass(cls):
if (cls is TestFairseqEncoderModelBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(issubclass(model_cls, FairseqEncoderModel), msg='This class is only used for testing FairseqEncoderModel')
(task, parser) = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if (extra_args_setters is not None):
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = (get_dummy_input() if (input is None) else input)
self.forward_input.pop('prev_output_tokens', None)
def setUp(self):
super().setUp()
def test_forward(self):
if (self.forward_input and self.model):
bsz = self.forward_input['src_tokens'].size(0)
forward_output = self.model.forward(**self.forward_input)
(succ, msg) = check_encoder_output(forward_output, batch_size=bsz)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if (self.model and self.forward_input):
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
self.assertTrue(hasattr(logprob, 'batch_first'))
self.assertTrue(hasattr(prob, 'batch_first'))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
|
class TestFairseqEncoderBase(unittest.TestCase):
'\n base class to test FairseqEncoder\n '
@classmethod
def setUpClass(cls):
if (cls is TestFairseqEncoderBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(isinstance(encoder, FairseqEncoder), msg='This class is only used for test FairseqEncoder')
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = (get_dummy_input() if (input is None) else input)
self.forward_input.pop('prev_output_tokens', None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if (self.encoder and self.forward_input):
bsz = self.forward_input['src_tokens'].size(0)
forward_output = self.encoder.forward(**self.forward_input)
(succ, msg) = check_encoder_output(forward_output, batch_size=bsz)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
|
class TestFairseqDecoderBase(unittest.TestCase):
'\n base class to test FairseqDecoder\n '
@classmethod
def setUpClass(cls):
if (cls is TestFairseqDecoderBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(isinstance(decoder, FairseqDecoder), msg='This class is only used for test FairseqDecoder')
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = (get_dummy_encoder_output() if (input is None) else input)
def setUpPrevOutputTokens(self, tokens=None):
if (tokens is None):
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input['prev_output_tokens']
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if ((self.decoder is not None) and (self.forward_input is not None) and (self.prev_output_tokens is not None)):
forward_output = self.decoder.forward(prev_output_tokens=self.prev_output_tokens, encoder_out=self.forward_input)
(succ, msg) = check_decoder_output(forward_output)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output
|
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@classmethod
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
return torch.log(torch.div(net_output['encoder_out'], (1 - net_output['encoder_out'])))
|
class DummyEncoder(FairseqEncoder):
def __init__(self):
super().__init__(None)
def forward(self, src_tokens, src_lengths):
(mask, max_len) = lengths_to_encoder_padding_mask(src_lengths)
return {'encoder_out': src_tokens, 'encoder_padding_mask': mask}
|
class CrossEntropyCriterionTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if (cls is CrossEntropyCriterionTestBase):
raise unittest.SkipTest('Skipping base class test case')
super().setUpClass()
def setUpArgs(self):
args = argparse.Namespace()
args.sentence_avg = False
args.threshold = 0.1
return args
def setUp(self):
args = self.setUpArgs()
self.model = DummyEncoderModel(encoder=DummyEncoder())
self.criterion = self.criterion_cls(args=args, task=DummyTask(args))
def get_src_tokens(self, correct_prediction, aggregate):
'\n correct_prediction: True if the net_output (src_tokens) should\n predict the correct target\n aggregate: True if the criterion expects net_output (src_tokens)\n aggregated across time axis\n '
predicted_idx = (0 if correct_prediction else 1)
if aggregate:
src_tokens = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
src_tokens[b][predicted_idx] = 1.0
else:
src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)
for b in range(2):
for t in range(10):
src_tokens[b][t][predicted_idx] = 1.0
return src_tokens
def get_target(self, soft_target):
if soft_target:
target = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
target[b][0] = 1.0
else:
target = torch.zeros((2, 10), dtype=torch.long)
return target
def get_test_sample(self, correct, soft_target, aggregate):
src_tokens = self.get_src_tokens(correct, aggregate)
target = self.get_target(soft_target)
L = src_tokens.size(1)
return {'net_input': {'src_tokens': src_tokens, 'src_lengths': torch.tensor([L])}, 'target': target, 'ntokens': (src_tokens.size(0) * src_tokens.size(1))}
|
class TestSeq2SeqCollator(unittest.TestCase):
def test_collate(self):
eos_idx = 1
pad_idx = 0
collater = Seq2SeqCollater(feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx)
frames1 = np.array([[7, 8], [9, 10]])
frames2 = np.array([[1, 2], [3, 4], [5, 6]])
target1 = np.array([4, 2, 3, eos_idx])
target2 = np.array([3, 2, eos_idx])
sample1 = {'id': 0, 'data': [frames1, target1]}
sample2 = {'id': 1, 'data': [frames2, target2]}
batch = collater.collate([sample1, sample2])
self.assertTensorEqual(batch['id'], torch.tensor([1, 0]))
self.assertEqual(batch['ntokens'], 7)
self.assertTensorEqual(batch['net_input']['src_tokens'], torch.tensor([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]]))
self.assertTensorEqual(batch['net_input']['prev_output_tokens'], torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]))
self.assertTensorEqual(batch['net_input']['src_lengths'], torch.tensor([3, 2]))
self.assertTensorEqual(batch['target'], torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]))
self.assertEqual(batch['nsentences'], 2)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0)
|
class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase):
def setUp(self):
self.criterion_cls = CrossEntropyWithAccCriterion
super().setUp()
def test_cross_entropy_all_correct(self):
sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False)
(loss, sample_size, logging_output) = self.criterion(self.model, sample, 'sum', log_probs=True)
assert (logging_output['correct'] == 20)
assert (logging_output['total'] == 20)
assert (logging_output['sample_size'] == 20)
assert (logging_output['ntokens'] == 20)
def test_cross_entropy_all_wrong(self):
sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False)
(loss, sample_size, logging_output) = self.criterion(self.model, sample, 'sum', log_probs=True)
assert (logging_output['correct'] == 0)
assert (logging_output['total'] == 20)
assert (logging_output['sample_size'] == 20)
assert (logging_output['ntokens'] == 20)
|
class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
'\n vggtrasformer_1 use 14 layers of transformer,\n for testing purpose, it is too expensive. For fast turn-around\n test, reduce the number of layers to 3.\n '
args.transformer_enc_config = '((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3'
super().setUp()
extra_args_setter = [vggtransformer_1, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
|
class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
'\n vggtrasformer_2 use 16 layers of transformer,\n for testing purpose, it is too expensive. For fast turn-around\n test, reduce the number of layers to 3.\n '
args.transformer_enc_config = '((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3'
super().setUp()
extra_args_setter = [vggtransformer_2, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
|
class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
'\n vggtrasformer_base use 12 layers of transformer,\n for testing purpose, it is too expensive. For fast turn-around\n test, reduce the number of layers to 3.\n '
args.transformer_enc_config = '((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3'
super().setUp()
extra_args_setter = [vggtransformer_base, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
|
class VGGTransformerEncoderTest(TestFairseqEncoderBase):
def setUp(self):
super().setUp()
self.setUpInput(get_dummy_input(T=50, D=80, B=5))
def test_forward(self):
print('1. test standard vggtransformer')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80))
super().test_forward()
print('2. test vggtransformer with limited right context')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=((- 1), 5)))
super().test_forward()
print('3. test vggtransformer with limited left context')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=(5, (- 1))))
super().test_forward()
print('4. test vggtransformer with limited right context and sampling')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=((- 1), 12), transformer_sampling=(2, 2)))
super().test_forward()
print('5. test vggtransformer with windowed context and sampling')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=(12, 12), transformer_sampling=(2, 2)))
|
class TransformerDecoderTest(TestFairseqDecoderBase):
def setUp(self):
super().setUp()
dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE)
decoder = TransformerDecoder(dict)
dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256))
self.setUpDecoder(decoder)
self.setUpInput(dummy_encoder_output)
self.setUpPrevOutputTokens()
|
class ModelWithSharedParameter(nn.Module):
def __init__(self):
super(ModelWithSharedParameter, self).__init__()
self.embedding = nn.Embedding(1000, 200)
self.FC1 = nn.Linear(200, 200)
self.FC2 = nn.Linear(200, 200)
self.FC2.weight = nn.Parameter(self.FC1.weight)
self.FC2.bias = nn.Parameter(self.FC1.bias)
self.relu = nn.ReLU()
def forward(self, input):
return (self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input))
|
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict([('a', torch.DoubleTensor([100.0])), ('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ('c', torch.IntTensor([7, 8, 9]))])
params_1 = collections.OrderedDict([('a', torch.DoubleTensor([1.0])), ('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ('c', torch.IntTensor([2, 2, 2]))])
params_avg = collections.OrderedDict([('a', torch.DoubleTensor([50.5])), ('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), ('c', torch.IntTensor([4, 5, 5]))])
(fd_0, path_0) = tempfile.mkstemp()
(fd_1, path_1) = tempfile.mkstemp()
torch.save(collections.OrderedDict([('model', params_0)]), path_0)
torch.save(collections.OrderedDict([('model', params_1)]), path_1)
output = average_checkpoints([path_0, path_1])['model']
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for ((k_expected, v_expected), (k_out, v_out)) in zip(params_avg.items(), output.items()):
self.assertEqual(k_expected, k_out, 'Key mismatch - expected {} but found {}. (Expected list of keys: {} vs actual list of keys: {})'.format(k_expected, k_out, params_avg.keys(), output.keys()))
np.testing.assert_allclose(v_expected.numpy(), v_out.numpy(), err_msg='Tensor value mismatch for key {}'.format(k_expected))
def test_average_checkpoints_with_shared_parameters(self):
def _construct_model_with_shared_parameters(path, value):
m = ModelWithSharedParameter()
nn.init.constant_(m.FC1.weight, value)
torch.save({'model': m.state_dict()}, path)
return m
tmpdir = tempfile.mkdtemp()
paths = []
path = os.path.join(tmpdir, 'm1.pt')
m1 = _construct_model_with_shared_parameters(path, 1.0)
paths.append(path)
path = os.path.join(tmpdir, 'm2.pt')
m2 = _construct_model_with_shared_parameters(path, 2.0)
paths.append(path)
path = os.path.join(tmpdir, 'm3.pt')
m3 = _construct_model_with_shared_parameters(path, 3.0)
paths.append(path)
new_model = average_checkpoints(paths)
self.assertTrue(torch.equal(new_model['model']['embedding.weight'], (((m1.embedding.weight + m2.embedding.weight) + m3.embedding.weight) / 3.0)))
self.assertTrue(torch.equal(new_model['model']['FC1.weight'], (((m1.FC1.weight + m2.FC1.weight) + m3.FC1.weight) / 3.0)))
self.assertTrue(torch.equal(new_model['model']['FC2.weight'], (((m1.FC2.weight + m2.FC2.weight) + m3.FC2.weight) / 3.0)))
shutil.rmtree(tmpdir)
|
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
@unittest.skipIf((not torch.cuda.is_available()), 'test requires a GPU')
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])
generate_main(data_dir)
@unittest.skipIf((not torch.cuda.is_available()), 'test requires a GPU')
def test_memory_efficient_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16'])
generate_main(data_dir)
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'])
self.assertTrue(('skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)))
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'])
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, ['--sampling', '--temperature', '2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--sampling', '--sampling-topp', '0.2', '--beam', '2', '--nbest', '2'])
generate_main(data_dir, ['--diversity-rate', '0.5', '--beam', '6'])
with self.assertRaises(ValueError):
generate_main(data_dir, ['--diverse-beam-groups', '4', '--match-source-len'])
generate_main(data_dir, ['--prefix-size', '2'])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--eval-bleu', '--eval-bleu-print-samples', '--eval-bleu-remove-bpe', '--eval-bleu-detok', 'space', '--eval-bleu-args', '{"beam": 4, "min_len": 10}'])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8'])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', ['--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '16', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--decoder-layers', '2'])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'], run_validation=True)
generate_main(data_dir)
def test_multilingual_transformer(self):
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, arch='multilingual_transformer', task='multilingual_translation', extra_flags=((['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'] + enc_ltok_flag) + dec_ltok_flag), lang_flags=['--lang-pairs', 'in-out,out-in'], run_validation=True, extra_valid_flags=(enc_ltok_flag + dec_ltok_flag))
generate_main(data_dir, extra_flags=((['--task', 'multilingual_translation', '--lang-pairs', 'in-out,out-in', '--source-lang', 'in', '--target-lang', 'out'] + enc_ltok_flag) + dec_ltok_flag))
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-embed-dim', '8', '--no-cross-attention', '--cross-self-attention', '--layer-wise-attention'], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'lightweight', '--decoder-conv-type', 'lightweight', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', ['--encoder-conv-type', 'dynamic', '--decoder-conv-type', 'dynamic', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_levenshtein_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'levenshtein_transformer', ['--apply-bert-init', '--early-exit', '6,6,6', '--criterion', 'nat_loss'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', '--length-loss-factor', '0.1'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '0', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', ['--apply-bert-init', '--src-embedding-copy', '--criterion', 'nat_loss', '--noise', 'full_mask', '--stochastic-approx', '--dae-ratio', '0.5', '--train-step', '3'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', ['--apply-bert-init', '--criterion', 'nat_loss', '--noise', 'random_mask'], task='translation_lev')
generate_main(data_dir, ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--task', 'translation_moe', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8'])
generate_main(data_dir, ['--task', 'translation_moe', '--method', 'hMoElp', '--mean-pool-gating-network', '--num-experts', '3', '--gen-expert', '0'])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(data_dir, 'transformer_align', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--load-alignments', '--alignment-layer', '1', '--criterion', 'label_smoothed_cross_entropy_with_alignment'], run_validation=True)
generate_main(data_dir)
|
class TestStories(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = ['--encoder-layers', '[(128, 3)] * 2', '--decoder-layers', '[(128, 3)] * 2', '--decoder-attention', 'True', '--encoder-attention', 'False', '--gated-attention', 'True', '--self-attention', 'True', '--project-input', 'True', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--decoder-out-embed-dim', '8', '--multihead-self-attention-nheads', '2']
train_translation_model(data_dir, 'fconv_self_att_wp', config)
generate_main(data_dir)
os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt'))
config.extend(['--pretrained', 'True', '--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'), '--save-dir', os.path.join(data_dir, 'fusion_model')])
train_translation_model(data_dir, 'fconv_self_att_wp', config)
|
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'fconv_lm', ['--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]', '--decoder-embed-dim', '280', '--optimizer', 'nag', '--lr', '0.1'])
eval_lm_main(data_dir)
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
|
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_legacy_mlm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, 'masked_lm')
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_mlm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, arch='masked_lm', extra_args=(('--encoder-learned-pos',) if learned_pos_emb else ()))
with tempfile.TemporaryDirectory('test_mlm_translation') as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(translation_dir, extra_flags=['--joined-dictionary'])
train_translation_model(translation_dir, arch='transformer_from_pretrained_xlm', extra_flags=((['--decoder-layers', '1', '--decoder-embed-dim', '32', '--decoder-attention-heads', '1', '--decoder-ffn-embed-dim', '32', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--pretrained-xlm-checkpoint', '{}/checkpoint_last.pt'.format(data_dir), '--activation-fn', 'gelu', '--max-source-positions', '500', '--max-target-positions', '500'] + (['--encoder-learned-pos', '--decoder-learned-pos'] if learned_pos_emb else [])) + (['--init-encoder-only'] if encoder_only else [])), task='translation_from_pretrained_xlm')
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
|
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', 'cross_lingual_lm', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr-scheduler', 'reduce_lr_on_plateau', '--lr-shrink', '0.5', '--lr', '0.0001', '--min-lr', '1e-09', '--dropout', '0.1', '--attention-dropout', '0.1', '--criterion', 'legacy_masked_lm_loss', '--masked-lm-only', '--monolingual-langs', 'in,out', '--num-segment', '5', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--dataset-impl', 'raw'] + list(extra_args)))
train.main(train_args)
|
class TestOptimizers(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_optimizers') as data_dir:
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta']
last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt')
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(data_dir, 'lstm', ['--required-batch-size-multiple', '1', '--encoder-layers', '1', '--encoder-hidden-size', '32', '--decoder-layers', '1', '--optimizer', optimizer])
generate_main(data_dir)
@unittest.skipIf((not torch.cuda.is_available()), 'test requires a GPU')
def test_flat_grads(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_flat_grads') as data_dir:
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
with self.assertRaises(RuntimeError):
train_translation_model(data_dir, 'lstm', ['--required-batch-size-multiple', '1', '--encoder-layers', '1', '--encoder-hidden-size', '32', '--decoder-layers', '1', '--optimizer', 'adafactor', '--fp16'])
train_translation_model(data_dir, 'lstm', ['--required-batch-size-multiple', '1', '--encoder-layers', '1', '--encoder-hidden-size', '32', '--decoder-layers', '1', '--optimizer', 'adafactor', '--fp16', '--fp16-no-flatten-grads'])
|
def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False):
def _create_dummy_data(filename):
data = torch.rand((num_examples * maxlen))
data = (97 + torch.floor((26 * data)).int())
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:(offset + ex_len)]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), 'r') as src_f, open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, open(os.path.join(data_dir, filename), 'w') as h:
for (src, tgt) in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = ((src_len + tgt_len) // 2)
num_alignments = random.randint((avg_len // 2), (2 * avg_len))
src_indices = torch.floor((torch.rand(num_alignments) * src_len)).int()
tgt_indices = torch.floor((torch.rand(num_alignments) * tgt_len)).int()
ex_str = ' '.join(['{}-{}'.format(src, tgt) for (src, tgt) in zip(src_indices, tgt_indices)])
print(ex_str, file=h)
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
if alignment:
_create_dummy_alignment_data('train.in', 'train.out', 'train.align')
_create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align')
_create_dummy_alignment_data('test.in', 'test.out', 'test.align')
|
def preprocess_translation_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args((['--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir] + (extra_flags or [])))
preprocess.main(preprocess_args)
|
def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False, lang_flags=None, extra_valid_flags=None):
if (lang_flags is None):
lang_flags = ['--source-lang', 'in', '--target-lang', 'out']
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, ((['--task', task, data_dir, '--save-dir', data_dir, '--arch', arch, '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--num-workers', 0] + lang_flags) + (extra_flags or [])))
train.main(train_args)
if run_validation:
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(validate_parser, ((['--task', task, data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--valid-subset', 'valid', '--max-tokens', '500', '--no-progress-bar'] + lang_flags) + (extra_valid_flags or [])))
validate.main(validate_args)
|
def generate_main(data_dir, extra_flags=None):
if (extra_flags is None):
extra_flags = ['--print-alignment']
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(generate_parser, ([data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar'] + (extra_flags or [])))
generate.main(generate_args)
generate_args.buffer_size = 0
generate_args.input = '-'
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
|
def preprocess_lm_data(data_dir):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(['--only-source', '--trainpref', os.path.join(data_dir, 'train.out'), '--validpref', os.path.join(data_dir, 'valid.out'), '--testpref', os.path.join(data_dir, 'test.out'), '--destdir', data_dir])
preprocess.main(preprocess_args)
|
def train_language_model(data_dir, arch, extra_flags=None, run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr', '0.0001', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d'] + (extra_flags or [])))
train.main(train_args)
if run_validation:
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(validate_parser, ['--task', 'language_modeling', data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--valid-subset', 'valid', '--max-tokens', '500', '--no-progress-bar'])
validate.main(validate_args)
|
def eval_lm_main(data_dir):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(eval_lm_parser, [data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--no-progress-bar'])
eval_lm.main(eval_lm_args)
|
def train_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', 'cross_lingual_lm', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr-scheduler', 'reduce_lr_on_plateau', '--lr-shrink', '0.5', '--lr', '0.0001', '--min-lr', '1e-09', '--dropout', '0.1', '--attention-dropout', '0.1', '--criterion', 'masked_lm_loss', '--masked-lm-only', '--monolingual-langs', 'in,out', '--num-segment', '5', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--dataset-impl', 'raw'] + list(extra_args)))
train.main(train_args)
|
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
|
def setup_model_loss_criterion(args, rank, is_cuda):
'\n setup model, criterion and optimizer based on input args\n '
args.distributed_rank = rank
distributed_utils.distributed_init(args)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(args, optimizer)
return (model, loss_fn, optimizer)
|
def train_step(input, target, model, loss_fn, optimizer):
'Do forward, backward and parameter update.'
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
|
def single_gpu_training(args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
(model, loss_fn, optimizer) = setup_model_loss_criterion(args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if (len(results) == 0):
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
|
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [0.001]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.distributed_backend = 'gloo'
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_init_host = 'localhost'
args.distributed_port = (port + 1)
args.local_world_size = args.distributed_world_size
return args
|
@unittest.skipIf((torch.cuda.device_count() < 2), 'test requires 2 GPUs')
class TestBMUF(unittest.TestCase):
def bmuf_process(self, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context('spawn')
for rank in range(args.distributed_world_size):
p = ctx.Process(target=single_gpu_training, args=(args, rank, iterations, results))
p.start()
processes.append(p)
for p in processes:
p.join()
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_bmuf_sync(self):
args = setup_args()
iterations = 1
self.bmuf_process(args, iterations)
def test_warmup_sync(self):
args = setup_args()
args.warmup_iterations = 20
iterations = 20
self.bmuf_process(args, iterations)
def test_warmup_sync_bmuf_sync(self):
args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
iterations = 25
self.bmuf_process(args, iterations)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001)
|
class TestCharacterTokenEmbedder(unittest.TestCase):
def test_character_token_embedder(self):
vocab = Dictionary()
vocab.add_symbol('hello')
vocab.add_symbol('there')
embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2)
test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']]
max_len = max((len(s) for s in test_sents))
input = torch.LongTensor(len(test_sents), (max_len + 2)).fill_(vocab.pad())
for i in range(len(test_sents)):
input[i][0] = vocab.eos()
for j in range(len(test_sents[i])):
input[i][(j + 1)] = vocab.index(test_sents[i][j])
input[i][(j + 2)] = vocab.eos()
embs = embedder(input)
assert (embs.size() == (len(test_sents), (max_len + 2), 5))
self.assertAlmostEqual(embs[0][0], embs[1][0])
self.assertAlmostEqual(embs[0][0], embs[0][(- 1)])
self.assertAlmostEqual(embs[0][1], embs[2][1])
self.assertAlmostEqual(embs[0][3], embs[1][1])
embs.sum().backward()
assert (embedder.char_embeddings.weight.grad is not None)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 1e-06)
|
class TestConcatDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, (- 1))
tokens_ds1 = TokenBlockDataset(tokens_1, sizes=[tokens_1.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
self.dataset_1 = LanguagePairDataset(tokens_ds1, tokens_ds1.sizes, d, shuffle=False)
tokens_2 = torch.LongTensor([2]).view(1, (- 1))
tokens_ds2 = TokenBlockDataset(tokens_2, sizes=[tokens_2.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
self.dataset_2 = LanguagePairDataset(tokens_ds2, tokens_ds2.sizes, d, shuffle=False)
def test_concat_dataset_basics(self):
d = ConcatDataset([self.dataset_1, self.dataset_2])
assert (len(d) == 2)
assert (d[0]['source'][0] == 1)
assert (d[1]['source'][0] == 2)
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2])
assert (len(d) == 3)
assert (d[0]['source'][0] == 1)
assert (d[1]['source'][0] == 2)
assert (d[2]['source'][0] == 2)
d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1])
assert (len(d) == 3)
assert (d[0]['source'][0] == 1)
assert (d[1]['source'][0] == 1)
assert (d[2]['source'][0] == 2)
|
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001)
|
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = ['A B C D', 'B C D', 'C D', 'D']
ref_ids1 = list(map(torch.IntTensor, [[4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2]]))
ref_ids2 = list(map(torch.IntTensor, [[7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2]]))
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for (toks, ref_toks) in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
with tempfile.NamedTemporaryFile(mode='w') as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
|
class DummyTask(FairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, 'ctc', False):
self.dictionary.add_symbol('<ctc_blank>')
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
|
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
for (id, _) in enumerate(range(vocab_size)):
dummy_dict.add_symbol('{}'.format(id), 1000)
return dummy_dict
|
def get_dummy_task_and_parser():
'\n Return a dummy task and argument parser, which can be used to\n create a model/criterion.\n '
parser = argparse.ArgumentParser(description='test_dummy_s2s_task', argument_default=argparse.SUPPRESS)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return (task, parser)
|
class TestExportModels(unittest.TestCase):
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
def test_export_multihead_attention(self):
module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
scripted = torch.jit.script(module)
self._test_save_and_load(scripted)
def test_incremental_state_multihead_attention(self):
module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module1 = torch.jit.script(module1)
module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module2 = torch.jit.script(module2)
state = {}
state = module1.set_incremental_state(state, 'key', {'a': torch.tensor([1])})
state = module2.set_incremental_state(state, 'key', {'a': torch.tensor([2])})
v1 = module1.get_incremental_state(state, 'key')['a']
v2 = module2.get_incremental_state(state, 'key')['a']
self.assertEqual(v1, 1)
self.assertEqual(v2, 2)
def test_positional_embedding(self):
module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding(embedding_dim=8, padding_idx=1)
scripted = torch.jit.script(module)
self._test_save_and_load(scripted)
@unittest.skipIf((torch.__version__ < '1.5.0'), 'Targeting OSS scriptability for the 1.5 release')
def test_export_transformer(self):
(task, parser) = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
self._test_save_and_load(scripted)
|
class TestFileIO(unittest.TestCase):
_tmpdir: Optional[str] = None
_tmpfile: Optional[str] = None
_tmpfile_contents = 'Hello, World'
@classmethod
def setUpClass(cls) -> None:
cls._tmpdir = tempfile.mkdtemp()
with open(os.path.join(cls._tmpdir, 'test.txt'), 'w') as f:
cls._tmpfile = f.name
f.write(cls._tmpfile_contents)
f.flush()
@classmethod
def tearDownClass(cls) -> None:
if (cls._tmpdir is not None):
shutil.rmtree(cls._tmpdir)
def test_file_io(self):
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, 'test.txt'), 'r') as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
def test_file_io_oss(self):
sys.modules['fvcore'] = MagicMock()
from fairseq.file_io import PathManager
with PathManager.open(os.path.join(self._tmpdir, 'test.txt'), 'r') as f:
s = f.read()
self.assertEqual(s, self._tmpfile_contents)
|
class TestIterators(unittest.TestCase):
def test_counting_iterator(self):
x = list(range(10))
itr = iterators.CountingIterator(x)
self.assertTrue(itr.has_next())
self.assertEqual(next(itr), 0)
self.assertEqual(next(itr), 1)
itr.skip(3)
self.assertEqual(next(itr), 5)
itr.skip(3)
self.assertEqual(next(itr), 9)
self.assertFalse(itr.has_next())
|
@unittest.skipIf((not torch.cuda.is_available()), 'test requires a GPU')
class TestMemoryEfficientFP16(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_load_state_dict(self):
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
optimizer = FairseqAdam(argparse.Namespace(lr=[1e-05], adam_betas='(0.9, 0.999)', adam_eps=1e-08, weight_decay=0.0), params)
me_optimizer = MemoryEfficientFP16Optimizer(argparse.Namespace(fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=0.0001), params, optimizer)
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for (k, v) in me_optimizer.optimizer.state.items():
self.assertTrue((k.dtype == torch.float16))
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue((v_i.dtype == torch.float32))
|
class TestMetrics(unittest.TestCase):
def test_nesting(self):
with metrics.aggregate() as a:
metrics.log_scalar('loss', 1)
with metrics.aggregate() as b:
metrics.log_scalar('loss', 2)
self.assertEqual(a.get_smoothed_values()['loss'], 1.5)
self.assertEqual(b.get_smoothed_values()['loss'], 2)
def test_new_root(self):
with metrics.aggregate() as a:
metrics.log_scalar('loss', 1)
with metrics.aggregate(new_root=True) as b:
metrics.log_scalar('loss', 2)
self.assertEqual(a.get_smoothed_values()['loss'], 1)
self.assertEqual(b.get_smoothed_values()['loss'], 2)
def test_nested_new_root(self):
with metrics.aggregate() as layer1:
metrics.log_scalar('loss', 1)
with metrics.aggregate(new_root=True) as layer2:
metrics.log_scalar('loss', 2)
with metrics.aggregate() as layer3:
metrics.log_scalar('loss', 3)
with metrics.aggregate(new_root=True) as layer4:
metrics.log_scalar('loss', 4)
metrics.log_scalar('loss', 1.5)
self.assertEqual(layer4.get_smoothed_values()['loss'], 4)
self.assertEqual(layer3.get_smoothed_values()['loss'], 3)
self.assertEqual(layer2.get_smoothed_values()['loss'], 2.5)
self.assertEqual(layer1.get_smoothed_values()['loss'], 1.25)
def test_named(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar('loss', 1)
metrics.log_scalar('loss', 3)
with metrics.aggregate(name):
metrics.log_scalar('loss', 2)
self.assertEqual(metrics.get_smoothed_values(name)['loss'], 1.5)
def test_nested_duplicate_names(self):
name = str(uuid.uuid4())
metrics.reset_meters(name)
with metrics.aggregate(name):
metrics.log_scalar('loss', 1)
with metrics.aggregate() as other:
with metrics.aggregate(name):
metrics.log_scalar('loss', 2)
metrics.log_scalar('loss', 6)
self.assertEqual(metrics.get_smoothed_values(name)['loss'], 3)
self.assertEqual(other.get_smoothed_values()['loss'], 2)
|
class TestMultiCorpusSampledDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, (- 1))
tokens_ds1 = TokenBlockDataset(tokens_1, sizes=[tokens_1.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
self.dataset_1 = LanguagePairDataset(tokens_ds1, tokens_ds1.sizes, d, shuffle=False)
tokens_2 = torch.LongTensor([2]).view(1, (- 1))
tokens_ds2 = TokenBlockDataset(tokens_2, sizes=[tokens_2.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
self.dataset_2 = LanguagePairDataset(tokens_ds2, tokens_ds2.sizes, d, shuffle=False)
def _test_sample_helper(self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None):
np.random.seed(0)
if (sampling_func is None):
m = MultiCorpusSampledDataset(OrderedDict({0: self.dataset_1, 1: self.dataset_2}))
else:
m = MultiCorpusSampledDataset(OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func)
m.ordered_indices()
count_sample_from_first_dataset = 0
for _ in range(num_samples):
if (m.collater([m[0], m[1]])['net_input']['src_tokens'][0] == 1):
count_sample_from_first_dataset += 1
sample_from_first_ds_percentage = ((1.0 * count_sample_from_first_dataset) / num_samples)
self.assertLess(abs((sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage)), 0.01)
def test_multi_corpus_sampled_dataset_uniform_sample(self):
self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5)
def test_multi_corpus_sampled_dataset_weighted_sample(self):
def naive_weighted_sample(weights):
def f(l):
v = np.random.random()
agg = 0
for (i, weight) in enumerate(weights):
agg += weight
if (agg > v):
return i
return f
self._test_sample_helper(expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]))
|
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [(None, None, None), (torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool()), (None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool()), (torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool())]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False)
if (key_padding_mask is not None):
self.assertTrue(torch.all(torch.eq(key_padding_mask, c[2])), f'Unexpected resultant key padding mask: {key_padding_mask} given current: {c[0]} and previous: {c[1]}')
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
|
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"\n Args:\n append_eos: if True, each input sentence in the source tokens tensor\n will have an EOS appended to the end.\n\n Returns:\n vocabs: BPE vocab with continuation markers as suffixes to denote\n non-end of word tokens. This is the standard BPE format used in\n fairseq's preprocessing.\n x: input tensor containing numberized source tokens, with EOS at the\n end if append_eos is true\n src_lengths: and source lengths.\n "
vocab = Dictionary()
vocab.add_symbol('he@@')
vocab.add_symbol('llo')
vocab.add_symbol('how')
vocab.add_symbol('are')
vocab.add_symbol('y@@')
vocab.add_symbol('ou')
vocab.add_symbol('n@@')
vocab.add_symbol('ew')
vocab.add_symbol('or@@')
vocab.add_symbol('k')
src_tokens = [['he@@', 'llo', 'n@@', 'ew', 'y@@', 'or@@', 'k'], ['how', 'are', 'y@@', 'ou']]
(x, src_lengths) = (x, src_lengths) = self._convert_src_tokens_to_tensor(vocab=vocab, src_tokens=src_tokens, append_eos=append_eos)
return (vocab, x, src_lengths)
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"\n Args:\n append_eos: if True, each input sentence in the source tokens tensor\n will have an EOS appended to the end.\n\n Returns:\n vocabs: BPE vocab with end-of-word markers as suffixes to denote\n tokens at the end of a word. This is an alternative to fairseq's\n standard preprocessing framework and is not generally supported\n within fairseq.\n x: input tensor containing numberized source tokens, with EOS at the\n end if append_eos is true\n src_lengths: and source lengths.\n "
vocab = Dictionary()
vocab.add_symbol('he')
vocab.add_symbol('llo_EOW')
vocab.add_symbol('how_EOW')
vocab.add_symbol('are_EOW')
vocab.add_symbol('y')
vocab.add_symbol('ou_EOW')
vocab.add_symbol('n')
vocab.add_symbol('ew_EOW')
vocab.add_symbol('or')
vocab.add_symbol('k_EOW')
src_tokens = [['he', 'llo_EOW', 'n', 'ew_EOW', 'y', 'or', 'k_EOW'], ['how_EOW', 'are_EOW', 'y', 'ou_EOW']]
(x, src_lengths) = (x, src_lengths) = self._convert_src_tokens_to_tensor(vocab=vocab, src_tokens=src_tokens, append_eos=append_eos)
return (vocab, x, src_lengths)
def _get_test_data_with_word_vocab(self, append_eos=True):
'\n Args:\n append_eos: if True, each input sentence in the source tokens tensor\n will have an EOS appended to the end.\n\n Returns:\n vocabs: word vocab\n x: input tensor containing numberized source tokens, with EOS at the\n end if append_eos is true\n src_lengths: and source lengths.\n '
vocab = Dictionary()
vocab.add_symbol('hello')
vocab.add_symbol('how')
vocab.add_symbol('are')
vocab.add_symbol('you')
vocab.add_symbol('new')
vocab.add_symbol('york')
src_tokens = [['hello', 'new', 'york', 'you'], ['how', 'are', 'you', 'new', 'york']]
(x, src_lengths) = self._convert_src_tokens_to_tensor(vocab=vocab, src_tokens=src_tokens, append_eos=append_eos)
return (vocab, x, src_lengths)
def _convert_src_tokens_to_tensor(self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool):
src_len = [len(x) for x in src_tokens]
if append_eos:
src_len = [(length + 1) for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][(j + 1)] = vocab.eos()
x = x.transpose(1, 0)
return (x, torch.LongTensor(src_len))
def assert_eos_at_end(self, x, x_len, eos):
'Asserts last token of every sentence in x is EOS '
for i in range(len(x_len)):
self.assertEqual(x[(x_len[i] - 1)][i], eos, 'Expected eos (token id {eos}) at the end of sentence {i} but got {other} instead'.format(i=i, eos=eos, other=x[i][(- 1)]))
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
self.assertEqual((x_len[0] - 2), l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[(i + 2)][0])
def test_word_dropout_with_eos(self):
(vocab, x, x_len) = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
(x_noised, l_noised) = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if (i < 2):
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
(vocab, x, x_len) = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
(x_noised, l_noised) = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk())
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(self, x, x_len, max_shuffle_distance: int, vocab: Dictionary, expected_shufle_maps: List[Dict[(int, int)]], expect_eos_at_end: bool, bpe_end_marker=None):
"\n This verifies that with a given x, x_len, max_shuffle_distance, and\n vocab, we get the expected shuffle result.\n\n Args:\n x: Tensor of shape (T x B) = (sequence_length, batch_size)\n x_len: Tensor of length B = batch_size\n max_shuffle_distance: arg to pass to noising\n expected_shuffle_maps: List[mapping] where mapping is a\n Dict[old_index, new_index], mapping x's elements from their\n old positions in x to their new positions in x.\n expect_eos_at_end: if True, check the output to make sure there is\n an EOS at the end.\n bpe_end_marker: str denoting the BPE end token. If this is not None, we\n set the BPE cont token to None in the noising classes.\n "
bpe_cont_marker = None
if (bpe_end_marker is None):
bpe_cont_marker = '@@'
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker)
(x_noised, l_noised) = word_shuffle.noising(x, x_len, max_shuffle_distance=max_shuffle_distance)
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for (k, v) in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
for (pre_shuffle_length, post_shuffle_length) in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
(vocab, x, x_len) = self._get_test_data_with_bpe_cont_marker(append_eos=True)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[self.generate_unchanged_shuffle_map(example_len) for example_len in x_len], expect_eos_at_end=True)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}], expect_eos_at_end=True)
def test_word_shuffle_with_eos_nonbpe(self):
'The purpose of this is to test epermute logic with word vocabs'
(vocab, x, x_len) = self._get_test_data_with_word_vocab(append_eos=True)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[self.generate_unchanged_shuffle_map(example_len) for example_len in x_len], expect_eos_at_end=True)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[{0: 0, 1: 1, 2: 3, 3: 2}, {0: 0, 1: 2, 2: 1, 3: 3, 4: 4}], expect_eos_at_end=True)
def test_word_shuffle_without_eos(self):
'Same result as word shuffle with eos except no EOS at end'
(vocab, x, x_len) = self._get_test_data_with_bpe_cont_marker(append_eos=False)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[self.generate_unchanged_shuffle_map(example_len) for example_len in x_len], expect_eos_at_end=False)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}], expect_eos_at_end=False)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
'Same result as word shuffle without eos except using BPE end token'
(vocab, x, x_len) = self._get_test_data_with_bpe_end_marker(append_eos=False)
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[self.generate_unchanged_shuffle_map(example_len) for example_len in x_len], expect_eos_at_end=False, bpe_end_marker='_EOW')
self.assert_word_shuffle_matches_expected(x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}], expect_eos_at_end=False, bpe_end_marker='_EOW')
def assert_no_eos_at_end(self, x, x_len, eos):
'Asserts that the last token of each sentence in x is not EOS '
for i in range(len(x_len)):
self.assertNotEqual(x[(x_len[i] - 1)][i], eos, 'Expected no eos (token id {eos}) at the end of sentence {i}.'.format(eos=eos, i=i))
def test_word_dropout_without_eos(self):
'Same result as word dropout with eos except no EOS at end'
(vocab, x, x_len) = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
(x_noised, l_noised) = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
'Same result as word blank with eos except no EOS at end'
(vocab, x, x_len) = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
(x_noised, l_noised) = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk())
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False):
'\n Constructs a NoisingDataset and the corresponding\n ``LanguagePairDataset(NoisingDataset(src), src)``. If\n *append_eos_to_tgt* is True, wrap the source dataset in\n :class:`TransformEosDataset` to append EOS to the clean source when\n using it as the target.\n '
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict)
language_pair_dataset = TransformEosDataset(language_pair_dataset, src_dict.eos(), append_eos_to_tgt=append_eos_to_tgt)
dataloader = torch.utils.data.DataLoader(dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
(src_dict, src_tokens, _) = self._get_test_data_with_bpe_cont_marker(append_eos=True)
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()))
denoising_batch_result = self._get_noising_dataset_batch(src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict)
(eos, pad) = (src_dict.eos(), src_dict.pad())
expected_src = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]])
expected_tgt = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]])
generated_src = denoising_batch_result['net_input']['src_tokens']
tgt_tokens = denoising_batch_result['target']
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
'\n Similar to test noising dataset with eos except that we have to set\n *append_eos_to_tgt* to ``True``.\n '
(src_dict, src_tokens, _) = self._get_test_data_with_bpe_cont_marker(append_eos=False)
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()))
denoising_batch_result = self._get_noising_dataset_batch(src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict, append_eos_to_tgt=True)
(eos, pad) = (src_dict.eos(), src_dict.pad())
expected_src = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]])
expected_tgt = torch.LongTensor([[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]])
generated_src = denoising_batch_result['net_input']['src_tokens']
tgt_tokens = denoising_batch_result['target']
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.