repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/translation_moe/src/translation_moe.py | examples/translation_moe/src/translation_moe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import metrics, utils
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from .logsumexp_moe import LogSumExpMoE
from .mean_pool_gating_network import MeanPoolGatingNetwork
@register_task('translation_moe')
class TranslationMoETask(TranslationTask):
"""
Translation task for Mixture of Experts (MoE) models.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--method', default='hMoEup',
choices=['sMoElp', 'sMoEup', 'hMoElp', 'hMoEup'])
parser.add_argument('--num-experts', default=3, type=int, metavar='N',
help='number of experts')
parser.add_argument('--mean-pool-gating-network', action='store_true',
help='use a simple mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-dropout', type=float,
help='dropout for mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-encoder-dim', type=float,
help='encoder output dim for mean-pooling gating network')
parser.add_argument('--gen-expert', type=int, default=0,
help='which expert to use for generation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
if args.method == 'sMoElp':
# soft MoE with learned prior
self.uniform_prior = False
self.hard_selection = False
elif args.method == 'sMoEup':
# soft MoE with uniform prior
self.uniform_prior = True
self.hard_selection = False
elif args.method == 'hMoElp':
# hard MoE with learned prior
self.uniform_prior = False
self.hard_selection = True
elif args.method == 'hMoEup':
# hard MoE with uniform prior
self.uniform_prior = True
self.hard_selection = True
# add indicator tokens for each expert
for i in range(args.num_experts):
# add to both dictionaries in case we're sharing embeddings
src_dict.add_symbol('<expert_{}>'.format(i))
tgt_dict.add_symbol('<expert_{}>'.format(i))
super().__init__(args, src_dict, tgt_dict)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not self.uniform_prior and not hasattr(model, 'gating_network'):
if self.args.mean_pool_gating_network:
if getattr(args, 'mean_pool_gating_network_encoder_dim', None):
encoder_dim = args.mean_pool_gating_network_encoder_dim
elif getattr(args, 'encoder_embed_dim', None):
# assume that encoder_embed_dim is the encoder's output dimension
encoder_dim = args.encoder_embed_dim
else:
raise ValueError('Must specify --mean-pool-gating-network-encoder-dim')
if getattr(args, 'mean_pool_gating_network_dropout', None):
dropout = args.mean_pool_gating_network_dropout
elif getattr(args, 'dropout', None):
dropout = args.dropout
else:
raise ValueError('Must specify --mean-pool-gating-network-dropout')
model.gating_network = MeanPoolGatingNetwork(
encoder_dim, args.num_experts, dropout,
)
else:
raise ValueError(
'translation_moe task with learned prior requires the model to '
'have a gating network; try using --mean-pool-gating-network'
)
return model
def expert_index(self, i):
return i + self.tgt_dict.index('<expert_0>')
def _get_loss(self, sample, model, criterion):
assert hasattr(criterion, 'compute_loss'), \
'translation_moe task requires the criterion to implement the compute_loss() method'
k = self.args.num_experts
bsz = sample['target'].size(0)
def get_lprob_y(encoder_out, prev_output_tokens_k):
net_output = model.decoder(
prev_output_tokens=prev_output_tokens_k,
encoder_out=encoder_out,
)
loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False)
loss = loss.view(bsz, -1)
return -loss.sum(dim=1, keepdim=True) # -> B x 1
def get_lprob_yz(winners=None):
encoder_out = model.encoder(
src_tokens=sample['net_input']['src_tokens'],
src_lengths=sample['net_input']['src_lengths'],
)
if winners is None:
lprob_y = []
for i in range(k):
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
assert not prev_output_tokens_k.requires_grad
prev_output_tokens_k[:, 0] = self.expert_index(i)
lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k))
lprob_y = torch.cat(lprob_y, dim=1) # -> B x K
else:
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
prev_output_tokens_k[:, 0] = self.expert_index(winners)
lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B
if self.uniform_prior:
lprob_yz = lprob_y
else:
lprob_z = model.gating_network(encoder_out) # B x K
if winners is not None:
lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1))
lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K
return lprob_yz
# compute responsibilities without dropout
with utils.model_eval(model): # disable dropout
with torch.no_grad(): # disable autograd
lprob_yz = get_lprob_yz() # B x K
prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1)
assert not prob_z_xy.requires_grad
# compute loss with dropout
if self.hard_selection:
winners = prob_z_xy.max(dim=1)[1]
loss = -get_lprob_yz(winners)
else:
lprob_yz = get_lprob_yz() # B x K
loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1)
loss = loss.sum()
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data),
'ntokens': sample['ntokens'],
'nsentences': bsz,
'sample_size': sample_size,
'posterior': prob_z_xy.float().sum(dim=0).cpu(),
}
return loss, sample_size, logging_output
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, expert=None, constraints=None):
expert = expert or self.args.gen_expert
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self.expert_index(expert),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
metrics.log_scalar(
'posterior',
sum(log['posterior'] for log in logging_outputs if 'posterior' in log)
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/translation_moe/src/__init__.py | examples/translation_moe/src/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import translation_moe # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/infer.py | examples/speech_recognition/infer.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import editdistance
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, utils, tasks
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data.data_utils import post_process
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder", choices=["viterbi", "kenlm", "fairseqlm"], help="use a w2l decoder"
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action='store_true', help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.remove_bpe)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"]
)
print("{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"])
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.remove_bpe)
if res_files is not None:
print("{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"])
print("{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"])
# only score top hypothesis
if not args.quiet:
logger.debug("HYPO:" + hyp_words)
logger.debug("TARGET:" + tgt_words)
logger.debug("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f'{args.shard_id}_{file_prefix}'
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def load_models_and_criterions(filenames, data_path, arg_overrides=None, task=None, model_state=None):
models = []
criterions = []
if arg_overrides is None:
arg_overrides = {}
arg_overrides['wer_args'] = None
arg_overrides['data'] = data_path
if filenames is None:
assert model_state is not None
filenames = [0]
else:
filenames = filenames.split(":")
for filename in filenames:
if model_state is None:
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)
else:
state = model_state
args = state["args"]
if task is None:
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state["model"], strict=True)
models.append(model)
criterion = task.build_criterion(args)
if "criterion" in state:
criterion.load_state_dict(state["criterion"], strict=True)
criterions.append(criterion)
return models, criterions, args
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception('invalid sizes')
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
if task is None:
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info("| decoding with criterion {}".format(args.criterion))
# Load ensemble
if args.load_emissions:
models, criterions = [], []
else:
logger.info("| loading model(s) from {}".format(args.path))
models, criterions, _ = load_models_and_criterions(
args.path,
data_path=args.data,
arg_overrides=eval(args.model_overrides), # noqa
task=task,
model_state=model_state,
)
optimize_models(args, use_cuda, models)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
trans = criterions[0].asg.trans.data
args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
return super().build_generator(args)
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
errs_t = 0
lengths_t = 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = encoder_out["encoder_padding_mask"][i].cpu().numpy() if encoder_out["encoder_padding_mask"] is not None else None
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = sample["target"][i, :] if 'target_label' not in sample else sample["target_label"][i, :]
target_tokens = (
utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
)
# Process top predictions
errs, length = process_predictions(
args, hypos[i], None, tgt_dict, target_tokens, res_files, speaker, id
)
errs_t += errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/__init__.py | examples/speech_recognition/__init__.py | from . import tasks, criterions, models # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/w2l_decoder.py | examples/speech_recognition/w2l_decoder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wav2letter decoders.
"""
from collections import namedtuple, deque
import gc
import itertools as it
import numpy as np
import torch
import os.path as osp
import warnings
from fairseq import tasks
from fairseq.utils import apply_to_sample
from examples.speech_recognition.data.replabels import unpack_replabels
try:
from wav2letter.common import create_word_dict, load_words
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from wav2letter.decoder import (
CriterionType,
DecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
LexiconFreeDecoder,
)
except:
warnings.warn(
"wav2letter python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/wav2letter/wiki/Python-bindings"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
if args.criterion == "ctc":
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.asg_transitions = None
elif args.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert len(self.asg_transitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {args.criterion}")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
# encoder_out = models[0].encoder(**encoder_input)
encoder_out = models[0](**encoder_input)
if self.criterion_type == CriterionType.CTC:
emissions = models[0].get_normalized_probs(encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = filter(lambda x: x >= 0, idxs)
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
False,
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.bos()
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
lm_args = checkpoint["args"]
lm_args.data = osp.dirname(args.kenlm_model)
print(lm_args)
task = tasks.setup_task(lm_args)
model = task.build_model(lm_args)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/tasks/speech_recognition.py | examples/speech_recognition/tasks/speech_recognition.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import re
import sys
import torch
from fairseq.data import Dictionary
from fairseq.tasks import register_task, LegacyFairseqTask
from examples.speech_recognition.data import AsrDataset
from examples.speech_recognition.data.replabels import replabel_symbol
def get_asr_dataset_from_json(data_json_path, tgt_dict):
"""
Parse data json and create dataset.
See scripts/asr_prep_json.py which pack json from raw files
Json example:
{
"utts": {
"4771-29403-0025": {
"input": {
"length_ms": 170,
"path": "/tmp/file1.flac"
},
"output": {
"text": "HELLO \n",
"token": "HE LLO",
"tokenid": "4815, 861"
}
},
"1564-142299-0096": {
...
}
}
"""
if not os.path.isfile(data_json_path):
raise FileNotFoundError("Dataset not found: {}".format(data_json_path))
with open(data_json_path, "rb") as f:
data_samples = json.load(f)["utts"]
assert len(data_samples) != 0
sorted_samples = sorted(
data_samples.items(),
key=lambda sample: int(sample[1]["input"]["length_ms"]),
reverse=True,
)
aud_paths = [s[1]["input"]["path"] for s in sorted_samples]
ids = [s[0] for s in sorted_samples]
speakers = []
for s in sorted_samples:
m = re.search("(.+?)-(.+?)-(.+?)", s[0])
speakers.append(m.group(1) + "_" + m.group(2))
frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples]
tgt = [
[int(i) for i in s[1]["output"]["tokenid"].split(", ")]
for s in sorted_samples
]
# append eos
tgt = [[*t, tgt_dict.eos()] for t in tgt]
return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
@register_task("speech_recognition")
class SpeechRecognitionTask(LegacyFairseqTask):
"""
Task for training speech recognition model.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--silence-token", default="\u2581", help="token for silence (used by w2l)"
)
parser.add_argument('--max-source-positions', default=sys.maxsize, type=int, metavar='N',
help='max number of frames in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
dict_path = os.path.join(args.data, "dict.txt")
if not os.path.isfile(dict_path):
raise FileNotFoundError("Dict not found: {}".format(dict_path))
tgt_dict = Dictionary.load(dict_path)
if args.criterion == "ctc_loss":
tgt_dict.add_symbol("<ctc_blank>")
elif args.criterion == "asg_loss":
for i in range(1, args.max_replabel + 1):
tgt_dict.add_symbol(replabel_symbol(i))
print("| dictionary: {} types".format(len(tgt_dict)))
return cls(args, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
data_json_path = os.path.join(self.args.data, "{}.json".format(split))
self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
def build_generator(self, models, args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, self.target_dictionary)
else:
return super().build_generator(models, args)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None
def max_positions(self):
"""Return the max speech and sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/tasks/__init__.py | examples/speech_recognition/tasks/__init__.py | import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('examples.speech_recognition.tasks.' + task_name)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/datasets/asr_prep_json.py | examples/speech_recognition/datasets/asr_prep_json.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import concurrent.futures
from itertools import chain
import argparse
import os
import json
import sentencepiece as spm
import multiprocessing
from fairseq.data import Dictionary
MILLISECONDS_TO_SECONDS = 0.001
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
import torchaudio
input = {}
output = {}
si, ei = torchaudio.info(aud_path)
input["length_ms"] = int(si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS)
input["path"] = aud_path
token = " ".join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output["text"] = lable
output["token"] = token
output["tokenid"] = ', '.join(map(str, [t.tolist() for t in ids]))
return {utt_id: {"input": input, "output": output}}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-dirs", nargs="+", default=['-'], required=True,
help="input directories with audio files")
parser.add_argument("--labels", required=True,
help="aggregated input labels with format <ID LABEL> per line",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--spm-model", required=True,
help="sentencepiece model to use for encoding",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--dictionary", required=True,
help="file to load fairseq dictionary from",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
parser.add_argument("--output", required=True, type=argparse.FileType('w'),
help="path to save json output")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(" ", 1)
labels[utt_id] = label
if len(labels) == 0:
raise Exception('No labels found in ', args.labels_path)
Sample = namedtuple('Sample', 'aud_path utt_id')
samples = []
for path, _, files in chain.from_iterable(os.walk(path) for path in args.audio_dirs):
for f in files:
if f.endswith(args.audio_format):
if len(os.path.splitext(f)) != 2:
raise Exception('Expect <utt_id.extension> file name. Got: ', f)
utt_id = os.path.splitext(f)[0]
if utt_id not in labels:
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {executor.submit(process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict): s for s in samples}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print('generated an exception: ', exc)
else:
utts.update(data)
json.dump({"utts": utts}, args.output, indent=4)
if __name__ == "__main__":
main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/models/w2l_conv_glu_enc.py | examples/speech_recognition/models/w2l_conv_glu_enc.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/models/vggtransformer.py | examples/speech_recognition/models/vggtransformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import LinearizedConvolution
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
# TODO: shouldn't subsampling_factor determined in advance ?
input_lengths = (src_lengths.float() / subsampling_factor).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}:".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
+ "of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
))
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
))
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | true |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/models/__init__.py | examples/speech_recognition/models/__init__.py | import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
model_name = file[:file.find('.py')]
importlib.import_module('examples.speech_recognition.models.' + model_name)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/utils/wer_utils.py | examples/speech_recognition/utils/wer_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from collections import deque
from enum import Enum
import numpy as np
"""
Utility modules for computation of Word Error Rate,
Alignments, as well as more granular metrics like
deletion, insersion and substitutions.
"""
class Code(Enum):
match = 1
substitution = 2
insertion = 3
deletion = 4
class Token(object):
def __init__(self, lbl="", st=np.nan, en=np.nan):
if np.isnan(st):
self.label, self.start, self.end = "", 0.0, 0.0
else:
self.label, self.start, self.end = lbl, st, en
class AlignmentResult(object):
def __init__(self, refs, hyps, codes, score):
self.refs = refs # std::deque<int>
self.hyps = hyps # std::deque<int>
self.codes = codes # std::deque<Code>
self.score = score # float
def coordinate_to_offset(row, col, ncols):
return int(row * ncols + col)
def offset_to_row(offset, ncols):
return int(offset / ncols)
def offset_to_col(offset, ncols):
return int(offset % ncols)
def trimWhitespace(str):
return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str)))
def str2toks(str):
pieces = trimWhitespace(str).split(" ")
toks = []
for p in pieces:
toks.append(Token(p, 0.0, 0.0))
return toks
class EditDistance(object):
def __init__(self, time_mediated):
self.time_mediated_ = time_mediated
self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic>
self.backtraces_ = (
np.nan
) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_;
self.confusion_pairs_ = {}
def cost(self, ref, hyp, code):
if self.time_mediated_:
if code == Code.match:
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end)
elif code == Code.insertion:
return hyp.end - hyp.start
elif code == Code.deletion:
return ref.end - ref.start
else: # substitution
return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1
else:
if code == Code.match:
return 0
elif code == Code.insertion or code == Code.deletion:
return 3
else: # substitution
return 4
def get_result(self, refs, hyps):
res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
num_rows, num_cols = self.scores_.shape
res.score = self.scores_[num_rows - 1, num_cols - 1]
curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols)
while curr_offset != 0:
curr_row = offset_to_row(curr_offset, num_cols)
curr_col = offset_to_col(curr_offset, num_cols)
prev_offset = self.backtraces_[curr_row, curr_col]
prev_row = offset_to_row(prev_offset, num_cols)
prev_col = offset_to_col(prev_offset, num_cols)
res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++
res.hyps.appendleft(curr_col - 1)
if curr_row - 1 == prev_row and curr_col == prev_col:
res.codes.appendleft(Code.deletion)
elif curr_row == prev_row and curr_col - 1 == prev_col:
res.codes.appendleft(Code.insertion)
else:
# assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col)
ref_str = refs[res.refs[0]].label
hyp_str = hyps[res.hyps[0]].label
if ref_str == hyp_str:
res.codes.appendleft(Code.match)
else:
res.codes.appendleft(Code.substitution)
confusion_pair = "%s -> %s" % (ref_str, hyp_str)
if confusion_pair not in self.confusion_pairs_:
self.confusion_pairs_[confusion_pair] = 1
else:
self.confusion_pairs_[confusion_pair] += 1
curr_offset = prev_offset
return res
def align(self, refs, hyps):
if len(refs) == 0 and len(hyps) == 0:
return np.nan
# NOTE: we're not resetting the values in these matrices because every value
# will be overridden in the loop below. If this assumption doesn't hold,
# be sure to set all entries in self.scores_ and self.backtraces_ to 0.
self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1))
self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1))
num_rows, num_cols = self.scores_.shape
for i in range(num_rows):
for j in range(num_cols):
if i == 0 and j == 0:
self.scores_[i, j] = 0.0
self.backtraces_[i, j] = 0
continue
if i == 0:
self.scores_[i, j] = self.scores_[i, j - 1] + self.cost(
None, hyps[j - 1], Code.insertion
)
self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols)
continue
if j == 0:
self.scores_[i, j] = self.scores_[i - 1, j] + self.cost(
refs[i - 1], None, Code.deletion
)
self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols)
continue
# Below here both i and j are greater than 0
ref = refs[i - 1]
hyp = hyps[j - 1]
best_score = self.scores_[i - 1, j - 1] + (
self.cost(ref, hyp, Code.match)
if (ref.label == hyp.label)
else self.cost(ref, hyp, Code.substitution)
)
prev_row = i - 1
prev_col = j - 1
ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion)
if ins < best_score:
best_score = ins
prev_row = i
prev_col = j - 1
delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion)
if delt < best_score:
best_score = delt
prev_row = i - 1
prev_col = j
self.scores_[i, j] = best_score
self.backtraces_[i, j] = coordinate_to_offset(
prev_row, prev_col, num_cols
)
return self.get_result(refs, hyps)
class WERTransformer(object):
def __init__(self, hyp_str, ref_str, verbose=True):
self.ed_ = EditDistance(False)
self.id2oracle_errs_ = {}
self.utts_ = 0
self.words_ = 0
self.insertions_ = 0
self.deletions_ = 0
self.substitutions_ = 0
self.process(["dummy_str", hyp_str, ref_str])
if verbose:
print("'%s' vs '%s'" % (hyp_str, ref_str))
self.report_result()
def process(self, input): # std::vector<std::string>&& input
if len(input) < 3:
print(
"Input must be of the form <id> ... <hypo> <ref> , got ",
len(input),
" inputs:",
)
return None
# Align
# std::vector<Token> hyps;
# std::vector<Token> refs;
hyps = str2toks(input[-2])
refs = str2toks(input[-1])
alignment = self.ed_.align(refs, hyps)
if alignment is None:
print("Alignment is null")
return np.nan
# Tally errors
ins = 0
dels = 0
subs = 0
for code in alignment.codes:
if code == Code.substitution:
subs += 1
elif code == Code.insertion:
ins += 1
elif code == Code.deletion:
dels += 1
# Output
row = input
row.append(str(len(refs)))
row.append(str(ins))
row.append(str(dels))
row.append(str(subs))
# print(row)
# Accumulate
kIdIndex = 0
kNBestSep = "/"
pieces = input[kIdIndex].split(kNBestSep)
if len(pieces) == 0:
print(
"Error splitting ",
input[kIdIndex],
" on '",
kNBestSep,
"', got empty list",
)
return np.nan
id = pieces[0]
if id not in self.id2oracle_errs_:
self.utts_ += 1
self.words_ += len(refs)
self.insertions_ += ins
self.deletions_ += dels
self.substitutions_ += subs
self.id2oracle_errs_[id] = [ins, dels, subs]
else:
curr_err = ins + dels + subs
prev_err = np.sum(self.id2oracle_errs_[id])
if curr_err < prev_err:
self.id2oracle_errs_[id] = [ins, dels, subs]
return 0
def report_result(self):
# print("---------- Summary ---------------")
if self.words_ == 0:
print("No words counted")
return
# 1-best
best_wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
print(
"\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, "
"%0.2f%% dels, %0.2f%% subs)"
% (
best_wer,
self.utts_,
self.words_,
100.0 * self.insertions_ / self.words_,
100.0 * self.deletions_ / self.words_,
100.0 * self.substitutions_ / self.words_,
)
)
def wer(self):
if self.words_ == 0:
wer = np.nan
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
return wer
def stats(self):
if self.words_ == 0:
stats = {}
else:
wer = (
100.0
* (self.insertions_ + self.deletions_ + self.substitutions_)
/ self.words_
)
stats = dict(
{
"wer": wer,
"utts": self.utts_,
"numwords": self.words_,
"ins": self.insertions_,
"dels": self.deletions_,
"subs": self.substitutions_,
"confusion_pairs": self.ed_.confusion_pairs_,
}
)
return stats
def calc_wer(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.wer()
def calc_wer_stats(hyp_str, ref_str):
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.stats()
def get_wer_alignment_codes(hyp_str, ref_str):
"""
INPUT: hypothesis string, reference string
OUTPUT: List of alignment codes (intermediate results from WER computation)
"""
t = WERTransformer(hyp_str, ref_str, verbose=0)
return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
def merge_counts(x, y):
# Merge two hashes which have 'counts' as their values
# This can be used for example to merge confusion pair counts
# conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs'])
for k, v in y.items():
if k not in x:
x[k] = 0
x[k] += v
return x
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/criterions/ASG_loss.py | examples/speech_recognition/criterions/ASG_loss.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from examples.speech_recognition.data.replabels import pack_replabels
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from wav2letter.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/criterions/cross_entropy_acc.py | examples/speech_recognition/criterions/cross_entropy_acc.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/criterions/__init__.py | examples/speech_recognition/criterions/__init__.py | import importlib
import os
# ASG loss requires wav2letter
files_to_skip = set()
try:
import wav2letter
except ImportError:
files_to_skip.add("ASG_loss.py")
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/data/data_utils.py | examples/speech_recognition/data/data_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/data/replabels.py | examples/speech_recognition/data/replabels.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Replabel transforms for use with wav2letter's ASG criterion.
"""
def replabel_symbol(i):
"""
Replabel symbols used in wav2letter, currently just "1", "2", ...
This prevents training with numeral tokens, so this might change in the future
"""
return str(i)
def pack_replabels(tokens, dictionary, max_reps):
"""
Pack a token sequence so that repeated symbols are replaced by replabels
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_value_to_idx = [0] * (max_reps + 1)
for i in range(1, max_reps + 1):
replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i))
result = []
prev_token = -1
num_reps = 0
for token in tokens:
if token == prev_token and num_reps < max_reps:
num_reps += 1
else:
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
num_reps = 0
result.append(token)
prev_token = token
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
return result
def unpack_replabels(tokens, dictionary, max_reps):
"""
Unpack a token sequence so that replabels are replaced by repeated symbols
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_idx_to_value = {}
for i in range(1, max_reps + 1):
replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i
result = []
prev_token = -1
for token in tokens:
try:
for _ in range(replabel_idx_to_value[token]):
result.append(prev_token)
prev_token = -1
except KeyError:
result.append(token)
prev_token = token
return result
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/data/asr_dataset.py | examples/speech_recognition/data/asr_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from fairseq.data import FairseqDataset
from . import data_utils
from .collaters import Seq2SeqCollater
class AsrDataset(FairseqDataset):
"""
A dataset representing speech and corresponding transcription.
Args:
aud_paths: (List[str]): A list of str with paths to audio files.
aud_durations_ms (List[int]): A list of int containing the durations of
audio files.
tgt (List[torch.LongTensor]): A list of LongTensors containing the indices
of target transcriptions.
tgt_dict (~fairseq.data.Dictionary): target vocabulary.
ids (List[str]): A list of utterance IDs.
speakers (List[str]): A list of speakers corresponding to utterances.
num_mel_bins (int): Number of triangular mel-frequency bins (default: 80)
frame_length (float): Frame length in milliseconds (default: 25.0)
frame_shift (float): Frame shift in milliseconds (default: 10.0)
"""
def __init__(
self, aud_paths, aud_durations_ms, tgt,
tgt_dict, ids, speakers,
num_mel_bins=80, frame_length=25.0, frame_shift=10.0
):
assert frame_length > 0
assert frame_shift > 0
assert all(x > frame_length for x in aud_durations_ms)
self.frame_sizes = [
int(1 + (d - frame_length) / frame_shift)
for d in aud_durations_ms
]
assert len(aud_paths) > 0
assert len(aud_paths) == len(aud_durations_ms)
assert len(aud_paths) == len(tgt)
assert len(aud_paths) == len(ids)
assert len(aud_paths) == len(speakers)
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
self.s2s_collater = Seq2SeqCollater(
0, 1, pad_index=self.tgt_dict.pad(),
eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True
)
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = self.tgt[index] if self.tgt is not None else None
path = self.aud_paths[index]
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
sound, sample_rate = torchaudio.load_wav(path)
output = kaldi.fbank(
sound,
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
frame_shift=self.frame_shift
)
output_cmvn = data_utils.apply_mv_norm(output)
return {"id": index, "data": [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
return self.s2s_collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.frame_sizes[index],
len(self.tgt[index]) if self.tgt is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/data/collaters.py | examples/speech_recognition/data/collaters.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/speech_recognition/data/__init__.py | examples/speech_recognition/data/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
__all__ = [
'AsrDataset',
]
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank_generate.py | examples/noisychannel/rerank_generate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generate n-best translations using a trained model.
"""
from contextlib import redirect_stdout
import os
import subprocess
from fairseq import options
from fairseq_cli import generate, preprocess
from . import rerank_options, rerank_utils
def gen_and_reprocess_nbest(args):
if args.score_dict_dir is None:
args.score_dict_dir = args.data
if args.prefix_len is not None:
assert args.right_to_left1 is False, "prefix length not compatible with right to left models"
assert args.right_to_left2 is False, "prefix length not compatible with right to left models"
if args.nbest_list is not None:
assert args.score_model2 is None
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
store_data = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+args.data_dir_name
if not os.path.exists(store_data):
os.makedirs(store_data)
pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir = \
rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
args.gen_model_name, args.shard_id, args.num_shards,
args.sampling, args.prefix_len, args.target_prefix_frac,
args.source_prefix_frac)
assert not (args.right_to_left1 and args.backwards1), "backwards right to left not supported"
assert not (args.right_to_left2 and args.backwards2), "backwards right to left not supported"
assert not (args.prefix_len is not None and args.target_prefix_frac is not None), \
"target prefix frac and target prefix len incompatible"
# make directory to store generation results
if not os.path.exists(pre_gen):
os.makedirs(pre_gen)
rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None
rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None
if args.nbest_list is not None:
rerank2_is_gen = True
# make directories to store preprossed nbest list for reranking
if not os.path.exists(left_to_right_preprocessed_dir):
os.makedirs(left_to_right_preprocessed_dir)
if not os.path.exists(right_to_left_preprocessed_dir):
os.makedirs(right_to_left_preprocessed_dir)
if not os.path.exists(lm_preprocessed_dir):
os.makedirs(lm_preprocessed_dir)
if not os.path.exists(backwards_preprocessed_dir):
os.makedirs(backwards_preprocessed_dir)
score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2)
predictions_bpe_file = pre_gen+"/generate_output_bpe.txt"
using_nbest = args.nbest_list is not None
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
else:
if not os.path.isfile(predictions_bpe_file):
print("STEP 1: generate predictions using the p(T|S) model with bpe")
print(args.data)
param1 = [args.data,
"--path", args.gen_model,
"--shard-id", str(args.shard_id),
"--num-shards", str(args.num_shards),
"--nbest", str(args.num_rescore),
"--batch-size", str(args.batch_size),
"--beam", str(args.num_rescore),
"--max-sentences", str(args.num_rescore),
"--gen-subset", args.gen_subset,
"--source-lang", args.source_lang,
"--target-lang", args.target_lang]
if args.sampling:
param1 += ["--sampling"]
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, param1)
print(input_args)
with open(predictions_bpe_file, 'w') as f:
with redirect_stdout(f):
generate.main(input_args)
gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe,
nbest=using_nbest, prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac)
if args.diff_bpe:
rerank_utils.write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo,
gen_output.no_bpe_target, pre_gen+"/source_gen_bpe."+args.source_lang,
pre_gen+"/target_gen_bpe."+args.target_lang,
pre_gen+"/reference_gen_bpe."+args.target_lang)
bitext_bpe = args.rescore_bpe_code
bpe_src_param = ["-c", bitext_bpe,
"--input", pre_gen+"/source_gen_bpe."+args.source_lang,
"--output", pre_gen+"/rescore_data."+args.source_lang]
bpe_tgt_param = ["-c", bitext_bpe,
"--input", pre_gen+"/target_gen_bpe."+args.target_lang,
"--output", pre_gen+"/rescore_data."+args.target_lang]
subprocess.call(["python",
os.path.join(os.path.dirname(__file__),
"subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param,
shell=False)
subprocess.call(["python",
os.path.join(os.path.dirname(__file__),
"subword-nmt/subword_nmt/apply_bpe.py")] + bpe_tgt_param,
shell=False)
if (not os.path.isfile(score1_file) and not rerank1_is_gen) or \
(args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen):
print("STEP 2: process the output of generate.py so we have clean text files with the translations")
rescore_file = "/rescore_data"
if args.prefix_len is not None:
prefix_len_rescore_file = rescore_file + "prefix"+str(args.prefix_len)
if args.target_prefix_frac is not None:
target_prefix_frac_rescore_file = rescore_file + "target_prefix_frac"+str(args.target_prefix_frac)
if args.source_prefix_frac is not None:
source_prefix_frac_rescore_file = rescore_file + "source_prefix_frac"+str(args.source_prefix_frac)
if not args.right_to_left1 or not args.right_to_left2:
if not args.diff_bpe:
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+rescore_file+"."+args.source_lang,
pre_gen+rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", bpe_symbol=args.remove_bpe)
if args.prefix_len is not None:
bw_rescore_file = prefix_len_rescore_file
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+prefix_len_rescore_file+"."+args.source_lang,
pre_gen+prefix_len_rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", prefix_len=args.prefix_len,
bpe_symbol=args.remove_bpe)
elif args.target_prefix_frac is not None:
bw_rescore_file = target_prefix_frac_rescore_file
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+target_prefix_frac_rescore_file+"."+args.source_lang,
pre_gen+target_prefix_frac_rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", bpe_symbol=args.remove_bpe,
target_prefix_frac=args.target_prefix_frac)
else:
bw_rescore_file = rescore_file
if args.source_prefix_frac is not None:
fw_rescore_file = source_prefix_frac_rescore_file
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+source_prefix_frac_rescore_file+"."+args.source_lang,
pre_gen+source_prefix_frac_rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", bpe_symbol=args.remove_bpe,
source_prefix_frac=args.source_prefix_frac)
else:
fw_rescore_file = rescore_file
if args.right_to_left1 or args.right_to_left2:
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+"/right_to_left_rescore_data."+args.source_lang,
pre_gen+"/right_to_left_rescore_data."+args.target_lang,
pre_gen+"/right_to_left_reference_file",
right_to_left=True, bpe_symbol=args.remove_bpe)
print("STEP 3: binarize the translations")
if not args.right_to_left1 or args.score_model2 is not None and not args.right_to_left2 or not rerank1_is_gen:
if args.backwards1 or args.backwards2:
if args.backwards_score_dict_dir is not None:
bw_dict = args.backwards_score_dict_dir
else:
bw_dict = args.score_dict_dir
bw_preprocess_param = ["--source-lang", scorer1_src,
"--target-lang", scorer1_tgt,
"--trainpref", pre_gen+bw_rescore_file,
"--srcdict", bw_dict + "/dict." + scorer1_src + ".txt",
"--tgtdict", bw_dict + "/dict." + scorer1_tgt + ".txt",
"--destdir", backwards_preprocessed_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(bw_preprocess_param)
preprocess.main(input_args)
preprocess_param = ["--source-lang", scorer1_src,
"--target-lang", scorer1_tgt,
"--trainpref", pre_gen+fw_rescore_file,
"--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt",
"--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt",
"--destdir", left_to_right_preprocessed_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
if args.right_to_left1 or args.right_to_left2:
preprocess_param = ["--source-lang", scorer1_src,
"--target-lang", scorer1_tgt,
"--trainpref", pre_gen+"/right_to_left_rescore_data",
"--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt",
"--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt",
"--destdir", right_to_left_preprocessed_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
return gen_output
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
gen_and_reprocess_nbest(args)
if __name__ == '__main__':
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank_score_lm.py | examples/noisychannel/rerank_score_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq import options
from . import rerank_options, rerank_utils
def score_lm(args):
using_nbest = args.nbest_list is not None
pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir = \
rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
args.gen_model_name, args.shard_id, args.num_shards,
args.sampling, args.prefix_len, args.target_prefix_frac,
args.source_prefix_frac)
predictions_bpe_file = pre_gen+"/generate_output_bpe.txt"
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe, nbest=using_nbest)
if args.language_model is not None:
lm_score_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.lm_name, lm_file=True)
if args.language_model is not None and not os.path.isfile(lm_score_file):
print("STEP 4.5: language modeling for P(T)")
if args.lm_bpe_code is None:
bpe_status = "no bpe"
elif args.lm_bpe_code == "shared":
bpe_status = "shared"
else:
bpe_status = "different"
rerank_utils.lm_scoring(lm_preprocessed_dir, bpe_status, gen_output, pre_gen,
args.lm_dict, args.lm_name, args.language_model,
args.lm_bpe_code, 128, lm_score_file, args.target_lang,
args.source_lang, prefix_len=args.prefix_len)
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_lm(args)
if __name__ == '__main__':
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank_score_bw.py | examples/noisychannel/rerank_score_bw.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import redirect_stdout
import os
from fairseq import options
from fairseq_cli import generate
from . import rerank_options, rerank_utils
def score_bw(args):
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
if args.score_model2 is not None:
if args.backwards2:
scorer2_src = args.target_lang
scorer2_tgt = args.source_lang
else:
scorer2_src = args.source_lang
scorer2_tgt = args.target_lang
rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None
rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None
pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir = \
rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
args.gen_model_name, args.shard_id, args.num_shards,
args.sampling, args.prefix_len, args.target_prefix_frac,
args.source_prefix_frac)
score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2)
if args.right_to_left1:
rerank_data1 = right_to_left_preprocessed_dir
elif args.backwards1:
rerank_data1 = backwards_preprocessed_dir
else:
rerank_data1 = left_to_right_preprocessed_dir
gen_param = ["--batch-size", str(128), "--score-reference", "--gen-subset", "train"]
if not rerank1_is_gen and not os.path.isfile(score1_file):
print("STEP 4: score the translations for model 1")
model_param1 = ["--path", args.score_model1, "--source-lang", scorer1_src, "--target-lang", scorer1_tgt]
gen_model1_param = [rerank_data1] + gen_param + model_param1
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, gen_model1_param)
with open(score1_file, 'w') as f:
with redirect_stdout(f):
generate.main(input_args)
if args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen:
print("STEP 4: score the translations for model 2")
if args.right_to_left2:
rerank_data2 = right_to_left_preprocessed_dir
elif args.backwards2:
rerank_data2 = backwards_preprocessed_dir
else:
rerank_data2 = left_to_right_preprocessed_dir
model_param2 = ["--path", args.score_model2, "--source-lang", scorer2_src, "--target-lang", scorer2_tgt]
gen_model2_param = [rerank_data2] + gen_param + model_param2
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, gen_model2_param)
with open(score2_file, 'w') as f:
with redirect_stdout(f):
generate.main(input_args)
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_bw(args)
if __name__ == '__main__':
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank_utils.py | examples/noisychannel/rerank_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import redirect_stdout
import math
import os
import re
import subprocess
from fairseq import options
from fairseq_cli import eval_lm, preprocess
def reprocess(fle):
# takes in a file of generate.py translation generate_output
# returns a source dict and hypothesis dict, where keys are the ID num (as a string)
# and values and the corresponding source and translation. There may be several translations
# per source, so the values for hypothesis_dict are lists.
# parses output of generate.py
with open(fle, 'r') as f:
txt = f.read()
"""reprocess generate.py output"""
p = re.compile(r"[STHP][-]\d+\s*")
hp = re.compile(r"(\s*[-]?\d+[.]?\d+\s*)|(\s*(-inf)\s*)")
source_dict = {}
hypothesis_dict = {}
score_dict = {}
target_dict = {}
pos_score_dict = {}
lines = txt.split("\n")
for line in lines:
line += "\n"
prefix = re.search(p, line)
if prefix is not None:
assert len(prefix.group()) > 2, "prefix id not found"
_, j = prefix.span()
id_num = prefix.group()[2:]
id_num = int(id_num)
line_type = prefix.group()[0]
if line_type == "H":
h_txt = line[j:]
hypo = re.search(hp, h_txt)
assert hypo is not None, ("regular expression failed to find the hypothesis scoring")
_, i = hypo.span()
score = hypo.group()
if id_num in hypothesis_dict:
hypothesis_dict[id_num].append(h_txt[i:])
score_dict[id_num].append(float(score))
else:
hypothesis_dict[id_num] = [h_txt[i:]]
score_dict[id_num] = [float(score)]
elif line_type == "S":
source_dict[id_num] = (line[j:])
elif line_type == "T":
target_dict[id_num] = (line[j:])
elif line_type == "P":
pos_scores = (line[j:]).split()
pos_scores = [float(x) for x in pos_scores]
if id_num in pos_score_dict:
pos_score_dict[id_num].append(pos_scores)
else:
pos_score_dict[id_num] = [pos_scores]
return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
def reprocess_nbest(fle):
"""reprocess interactive.py output"""
with open(fle, 'r') as f:
txt = f.read()
source_dict = {}
hypothesis_dict = {}
score_dict = {}
target_dict = {}
pos_score_dict = {}
lines = txt.split("\n")
hp = re.compile(r'[-]?\d+[.]?\d+')
j = -1
for _i, line in enumerate(lines):
line += "\n"
line_type = line[0]
if line_type == "H":
hypo = re.search(hp, line)
_, start_index = hypo.span()
score = hypo.group()
if j in score_dict:
score_dict[j].append(float(score))
hypothesis_dict[j].append(line[start_index:].strip("\t"))
else:
score_dict[j] = [float(score)]
hypothesis_dict[j] = [line[start_index:].strip("\t")]
elif line_type == "O":
j += 1
source_dict[j] = line[2:]
# we don't have the targets for interactive.py
target_dict[j] = "filler"
elif line_type == "P":
pos_scores = [float(pos_score) for pos_score in line.split()[1:]]
if j in pos_score_dict:
pos_score_dict[j].append(pos_scores)
else:
pos_score_dict[j] = [pos_scores]
assert source_dict.keys() == hypothesis_dict.keys()
assert source_dict.keys() == pos_score_dict.keys()
assert source_dict.keys() == score_dict.keys()
return source_dict, hypothesis_dict, score_dict, target_dict, pos_score_dict
def write_reprocessed(sources, hypos, targets, source_outfile,
hypo_outfile, target_outfile, right_to_left=False,
prefix_len=None, bpe_symbol=None,
target_prefix_frac=None, source_prefix_frac=None):
"""writes nbest hypothesis for rescoring"""
assert not (prefix_len is not None and target_prefix_frac is not None), \
"in writing reprocessed, only one type of prefix may be used"
assert not (prefix_len is not None and source_prefix_frac is not None), \
"in writing reprocessed, only one type of prefix may be used"
assert not (target_prefix_frac is not None and source_prefix_frac is not None), \
"in writing reprocessed, only one type of prefix may be used"
with open(source_outfile, 'w') as source_file, \
open(hypo_outfile, 'w') as hypo_file, \
open(target_outfile, 'w') as target_file:
assert len(sources) == len(hypos), "sources and hypos list length mismatch"
if right_to_left:
for i in range(len(sources)):
for j in range(len(hypos[i])):
if prefix_len is None:
hypo_file.write(make_right_to_left(hypos[i][j])+"\n")
else:
raise NotImplementedError()
source_file.write(make_right_to_left(sources[i])+"\n")
target_file.write(make_right_to_left(targets[i])+"\n")
else:
for i in sorted(sources.keys()):
for j in range(len(hypos[i])):
if prefix_len is not None:
shortened = get_prefix_no_bpe(hypos[i][j], bpe_symbol, prefix_len)+"\n"
hypo_file.write(shortened)
source_file.write(sources[i])
target_file.write(targets[i])
elif target_prefix_frac is not None:
num_words, shortened, num_bpe_tokens = \
calc_length_from_frac(hypos[i][j], target_prefix_frac, bpe_symbol)
shortened += "\n"
hypo_file.write(shortened)
source_file.write(sources[i])
target_file.write(targets[i])
elif source_prefix_frac is not None:
num_words, shortened, num_bpe_tokensn = \
calc_length_from_frac(sources[i], source_prefix_frac, bpe_symbol)
shortened += "\n"
hypo_file.write(hypos[i][j])
source_file.write(shortened)
target_file.write(targets[i])
else:
hypo_file.write(hypos[i][j])
source_file.write(sources[i])
target_file.write(targets[i])
def calc_length_from_frac(bpe_sentence, prefix_frac, bpe_symbol):
# return number of words, (not bpe tokens) that we want
no_bpe_sen = remove_bpe(bpe_sentence, bpe_symbol)
len_sen = len(no_bpe_sen.split())
num_words = math.ceil(len_sen * prefix_frac)
prefix = get_prefix_no_bpe(bpe_sentence, bpe_symbol, num_words)
num_bpe_tokens = len(prefix.split())
return num_words, prefix, num_bpe_tokens
def get_prefix(sentence, prefix_len):
"""assuming no bpe, gets the prefix of the sentence with prefix_len words"""
tokens = sentence.strip("\n").split()
if prefix_len >= len(tokens):
return sentence.strip("\n")
else:
return " ".join(tokens[:prefix_len])
def get_prefix_no_bpe(sentence, bpe_symbol, prefix_len):
if bpe_symbol is None:
return get_prefix(sentence, prefix_len)
else:
return " ".join(get_prefix_from_len(sentence.split(), bpe_symbol, prefix_len))
def get_prefix_from_len(sentence, bpe_symbol, prefix_len):
"""get the prefix of sentence with bpe, with prefix len in terms of words, not bpe tokens"""
bpe_count = sum([bpe_symbol.strip(" ") in t for t in sentence[:prefix_len]])
if bpe_count == 0:
return sentence[:prefix_len]
else:
return sentence[:prefix_len]+get_prefix_from_len(sentence[prefix_len:], bpe_symbol, bpe_count)
def get_num_bpe_tokens_from_len(sentence, bpe_symbol, prefix_len):
"""given a prefix length in terms of words, return the number of bpe tokens"""
prefix = get_prefix_no_bpe(sentence, bpe_symbol, prefix_len)
assert len(remove_bpe(prefix, bpe_symbol).split()) <= prefix_len
return len(prefix.split(" "))
def make_right_to_left(line):
tokens = line.split()
tokens.reverse()
new_line = " ".join(tokens)
return new_line
def remove_bpe(line, bpe_symbol):
line = line.replace("\n", '')
line = (line + ' ').replace(bpe_symbol, '').rstrip()
return line+("\n")
def remove_bpe_dict(pred_dict, bpe_symbol):
new_dict = {}
for i in pred_dict:
if type(pred_dict[i]) == list:
new_list = [remove_bpe(elem, bpe_symbol) for elem in pred_dict[i]]
new_dict[i] = new_list
else:
new_dict[i] = remove_bpe(pred_dict[i], bpe_symbol)
return new_dict
def parse_bleu_scoring(line):
p = re.compile(r'(BLEU4 = )\d+[.]\d+')
res = re.search(p, line)
assert res is not None, line
return float(res.group()[8:])
def get_full_from_prefix(hypo_prefix, hypos):
"""given a hypo prefix, recover the first hypo from the list of complete hypos beginning with that prefix"""
for hypo in hypos:
hypo_prefix = hypo_prefix.strip("\n")
len_prefix = len(hypo_prefix)
if hypo[:len_prefix] == hypo_prefix:
return hypo
# no match found
raise Exception()
def get_score(a, b, c, target_len, bitext_score1, bitext_score2=None, lm_score=None,
lenpen=None, src_len=None, tgt_len=None, bitext1_backwards=False,
bitext2_backwards=False, normalize=False):
if bitext1_backwards:
bitext1_norm = src_len
else:
bitext1_norm = tgt_len
if bitext_score2 is not None:
if bitext2_backwards:
bitext2_norm = src_len
else:
bitext2_norm = tgt_len
else:
bitext2_norm = 1
bitext_score2 = 0
if normalize:
score = a*bitext_score1/bitext1_norm + b*bitext_score2/bitext2_norm+c*lm_score/src_len
else:
score = a*bitext_score1 + b*bitext_score2+c*lm_score
if lenpen is not None:
score /= (target_len) ** float(lenpen)
return score
class BitextOutput(object):
def __init__(self, output_file, backwards, right_to_left, bpe_symbol,
prefix_len=None, target_prefix_frac=None, source_prefix_frac=None):
"""process output from rescoring"""
source, hypo, score, target, pos_score = reprocess(output_file)
if backwards:
self.hypo_fracs = source_prefix_frac
else:
self.hypo_fracs = target_prefix_frac
# remove length penalty so we can use raw scores
score, num_bpe_tokens = get_score_from_pos(pos_score, prefix_len, hypo, bpe_symbol, self.hypo_fracs, backwards)
source_lengths = {}
target_lengths = {}
assert hypo.keys() == source.keys(), "key mismatch"
if backwards:
tmp = hypo
hypo = source
source = tmp
for i in source:
# since we are reranking, there should only be one hypo per source sentence
if backwards:
len_src = len(source[i][0].split())
# record length without <eos>
if len_src == num_bpe_tokens[i][0] - 1:
source_lengths[i] = num_bpe_tokens[i][0] - 1
else:
source_lengths[i] = num_bpe_tokens[i][0]
target_lengths[i] = len(hypo[i].split())
source[i] = remove_bpe(source[i][0], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
len_tgt = len(hypo[i][0].split())
# record length without <eos>
if len_tgt == num_bpe_tokens[i][0] - 1:
target_lengths[i] = num_bpe_tokens[i][0] - 1
else:
target_lengths[i] = num_bpe_tokens[i][0]
source_lengths[i] = len(source[i].split())
if right_to_left:
source[i] = remove_bpe(make_right_to_left(source[i]), bpe_symbol)
target[i] = remove_bpe(make_right_to_left(target[i]), bpe_symbol)
hypo[i] = remove_bpe(make_right_to_left(hypo[i][0]), bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
else:
assert len(hypo[i]) == 1, "expected only one hypothesis per source sentence"
source[i] = remove_bpe(source[i], bpe_symbol)
target[i] = remove_bpe(target[i], bpe_symbol)
hypo[i] = remove_bpe(hypo[i][0], bpe_symbol)
score[i] = float(score[i][0])
pos_score[i] = pos_score[i][0]
self.rescore_source = source
self.rescore_hypo = hypo
self.rescore_score = score
self.rescore_target = target
self.rescore_pos_score = pos_score
self.backwards = backwards
self.right_to_left = right_to_left
self.target_lengths = target_lengths
self.source_lengths = source_lengths
class BitextOutputFromGen(object):
def __init__(self, predictions_bpe_file, bpe_symbol=None, nbest=False, prefix_len=None, target_prefix_frac=None):
if nbest:
pred_source, pred_hypo, pred_score, pred_target, pred_pos_score = reprocess_nbest(predictions_bpe_file)
else:
pred_source, pred_hypo, pred_score, pred_target, pred_pos_score = reprocess(predictions_bpe_file)
assert len(pred_source) == len(pred_hypo)
assert len(pred_source) == len(pred_score)
assert len(pred_source) == len(pred_target)
assert len(pred_source) == len(pred_pos_score)
# remove length penalty so we can use raw scores
pred_score, num_bpe_tokens = get_score_from_pos(pred_pos_score, prefix_len, pred_hypo,
bpe_symbol, target_prefix_frac, False)
self.source = pred_source
self.target = pred_target
self.score = pred_score
self.pos_score = pred_pos_score
self.hypo = pred_hypo
self.target_lengths = {}
self.source_lengths = {}
self.no_bpe_source = remove_bpe_dict(pred_source.copy(), bpe_symbol)
self.no_bpe_hypo = remove_bpe_dict(pred_hypo.copy(), bpe_symbol)
self.no_bpe_target = remove_bpe_dict(pred_target.copy(), bpe_symbol)
# indexes to match those from the rescoring models
self.rescore_source = {}
self.rescore_target = {}
self.rescore_pos_score = {}
self.rescore_hypo = {}
self.rescore_score = {}
self.num_hypos = {}
self.backwards = False
self.right_to_left = False
index = 0
for i in sorted(pred_source.keys()):
for j in range(len(pred_hypo[i])):
self.target_lengths[index] = len(self.hypo[i][j].split())
self.source_lengths[index] = len(self.source[i].split())
self.rescore_source[index] = self.no_bpe_source[i]
self.rescore_target[index] = self.no_bpe_target[i]
self.rescore_hypo[index] = self.no_bpe_hypo[i][j]
self.rescore_score[index] = float(pred_score[i][j])
self.rescore_pos_score[index] = pred_pos_score[i][j]
self.num_hypos[index] = len(pred_hypo[i])
index += 1
def get_score_from_pos(pos_score_dict, prefix_len, hypo_dict, bpe_symbol, hypo_frac, backwards):
score_dict = {}
num_bpe_tokens_dict = {}
assert prefix_len is None or hypo_frac is None
for key in pos_score_dict:
score_dict[key] = []
num_bpe_tokens_dict[key] = []
for i in range(len(pos_score_dict[key])):
if prefix_len is not None and not backwards:
num_bpe_tokens = get_num_bpe_tokens_from_len(hypo_dict[key][i], bpe_symbol, prefix_len)
score_dict[key].append(sum(pos_score_dict[key][i][:num_bpe_tokens]))
num_bpe_tokens_dict[key].append(num_bpe_tokens)
elif hypo_frac is not None:
num_words, shortened, hypo_prefix_len = calc_length_from_frac(hypo_dict[key][i], hypo_frac, bpe_symbol)
score_dict[key].append(sum(pos_score_dict[key][i][:hypo_prefix_len]))
num_bpe_tokens_dict[key].append(hypo_prefix_len)
else:
score_dict[key].append(sum(pos_score_dict[key][i]))
num_bpe_tokens_dict[key].append(len(pos_score_dict[key][i]))
return score_dict, num_bpe_tokens_dict
class LMOutput(object):
def __init__(self, lm_score_file, lm_dict=None, prefix_len=None, bpe_symbol=None, target_prefix_frac=None):
lm_sentences, lm_sen_scores, lm_sen_pos_scores, lm_no_bpe_sentences, lm_bpe_tokens = \
parse_lm(lm_score_file, prefix_len=prefix_len,
bpe_symbol=bpe_symbol, target_prefix_frac=target_prefix_frac)
self.sentences = lm_sentences
self.score = lm_sen_scores
self.pos_score = lm_sen_pos_scores
self.lm_dict = lm_dict
self.no_bpe_sentences = lm_no_bpe_sentences
self.bpe_tokens = lm_bpe_tokens
def parse_lm(input_file, prefix_len=None, bpe_symbol=None, target_prefix_frac=None):
"""parse output of eval_lm"""
with open(input_file, 'r') as f:
text = f.readlines()
text = text[7:]
cleaned_text = text[:-2]
sentences = {}
sen_scores = {}
sen_pos_scores = {}
no_bpe_sentences = {}
num_bpe_tokens_dict = {}
for _i, line in enumerate(cleaned_text):
tokens = line.split()
if tokens[0].isdigit():
line_id = int(tokens[0])
scores = [float(x[1:-1]) for x in tokens[2::2]]
sentences[line_id] = " ".join(tokens[1::2][:-1])+"\n"
if bpe_symbol is not None:
# exclude <eos> symbol to match output from generate.py
bpe_sen = " ".join(tokens[1::2][:-1])+"\n"
no_bpe_sen = remove_bpe(bpe_sen, bpe_symbol)
no_bpe_sentences[line_id] = no_bpe_sen
if prefix_len is not None:
num_bpe_tokens = get_num_bpe_tokens_from_len(bpe_sen, bpe_symbol, prefix_len)
sen_scores[line_id] = sum(scores[:num_bpe_tokens])
num_bpe_tokens_dict[line_id] = num_bpe_tokens
elif target_prefix_frac is not None:
num_words, shortened, target_prefix_len = calc_length_from_frac(bpe_sen, target_prefix_frac,
bpe_symbol)
sen_scores[line_id] = sum(scores[:target_prefix_len])
num_bpe_tokens_dict[line_id] = target_prefix_len
else:
sen_scores[line_id] = sum(scores)
num_bpe_tokens_dict[line_id] = len(scores)
sen_pos_scores[line_id] = scores
return sentences, sen_scores, sen_pos_scores, no_bpe_sentences, num_bpe_tokens_dict
def get_directories(data_dir_name, num_rescore, gen_subset,
fw_name, shard_id, num_shards,
sampling=False, prefix_len=None,
target_prefix_frac=None, source_prefix_frac=None):
nbest_file_id = "nbest_" + str(num_rescore) + \
"_subset_" + gen_subset + \
"_fw_name_" + fw_name + \
"_shard_" + str(shard_id) + \
"_of_" + str(num_shards)
if sampling:
nbest_file_id += "_sampling"
# the directory containing all information for this nbest list
pre_gen = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+data_dir_name+"/"+nbest_file_id
# the directory to store the preprocessed nbest list, for left to right rescoring
left_to_right_preprocessed_dir = pre_gen+"/left_to_right_preprocessed"
if source_prefix_frac is not None:
left_to_right_preprocessed_dir = left_to_right_preprocessed_dir + "/prefix_frac" + str(source_prefix_frac)
# the directory to store the preprocessed nbest list, for right to left rescoring
right_to_left_preprocessed_dir = pre_gen+"/right_to_left_preprocessed"
# the directory to store the preprocessed nbest list, for backwards rescoring
backwards_preprocessed_dir = pre_gen+"/backwards"
if target_prefix_frac is not None:
backwards_preprocessed_dir = backwards_preprocessed_dir+"/prefix_frac"+str(target_prefix_frac)
elif prefix_len is not None:
backwards_preprocessed_dir = backwards_preprocessed_dir+"/prefix_"+str(prefix_len)
# the directory to store the preprocessed nbest list, for rescoring with P(T)
lm_preprocessed_dir = pre_gen+"/lm_preprocessed"
return pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir
def lm_scoring(preprocess_directory, bpe_status, gen_output, pre_gen,
cur_lm_dict, cur_lm_name, cur_language_model, cur_lm_bpe_code,
batch_size, lm_score_file, target_lang, source_lang, prefix_len=None):
if prefix_len is not None:
assert bpe_status == "different", "bpe status must be different to use prefix len"
if bpe_status == "no bpe":
# run lm on output without bpe
write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo,
gen_output.no_bpe_target, pre_gen+"/rescore_data_no_bpe.de",
pre_gen+"/rescore_data_no_bpe.en", pre_gen+"/reference_file_no_bpe")
preprocess_lm_param = ["--only-source",
"--trainpref", pre_gen+"/rescore_data_no_bpe."+target_lang,
"--srcdict", cur_lm_dict,
"--destdir", preprocess_directory]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [preprocess_directory,
"--path", cur_language_model,
"--output-word-probs",
"--batch-size", str(batch_size),
"--max-tokens", "1024",
"--sample-break-mode", "eos",
"--gen-subset", "train"]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, 'w') as f:
with redirect_stdout(f):
eval_lm.main(input_args)
elif bpe_status == "shared":
preprocess_lm_param = ["--only-source",
"--trainpref", pre_gen+"/rescore_data."+target_lang,
"--srcdict", cur_lm_dict,
"--destdir", preprocess_directory]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [preprocess_directory,
"--path", cur_language_model,
"--output-word-probs",
"--batch-size", str(batch_size),
"--sample-break-mode", "eos",
"--gen-subset", "train"]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, 'w') as f:
with redirect_stdout(f):
eval_lm.main(input_args)
elif bpe_status == "different":
rescore_file = pre_gen+"/rescore_data_no_bpe"
rescore_bpe = pre_gen+"/rescore_data_new_bpe"
rescore_file += "."
rescore_bpe += "."
write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo,
gen_output.no_bpe_target, rescore_file+source_lang,
rescore_file+target_lang, pre_gen+"/reference_file_no_bpe",
bpe_symbol=None)
# apply LM bpe to nbest list
bpe_src_param = ["-c", cur_lm_bpe_code,
"--input", rescore_file+target_lang,
"--output", rescore_bpe+target_lang]
subprocess.call(["python",
os.path.join(os.path.dirname(__file__),
"subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param,
shell=False)
# uncomment to use fastbpe instead of subword-nmt bpe
# bpe_src_param = [rescore_bpe+target_lang, rescore_file+target_lang, cur_lm_bpe_code]
# subprocess.call(["/private/home/edunov/fastBPE/fast", "applybpe"] + bpe_src_param, shell=False)
preprocess_dir = preprocess_directory
preprocess_lm_param = ["--only-source",
"--trainpref", rescore_bpe+target_lang,
"--srcdict", cur_lm_dict,
"--destdir", preprocess_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_lm_param)
preprocess.main(input_args)
eval_lm_param = [preprocess_dir,
"--path", cur_language_model,
"--output-word-probs",
"--batch-size", str(batch_size),
"--max-tokens", "1024",
"--sample-break-mode", "eos",
"--gen-subset", "train"]
eval_lm_parser = options.get_eval_lm_parser()
input_args = options.parse_args_and_arch(eval_lm_parser, eval_lm_param)
with open(lm_score_file, 'w') as f:
with redirect_stdout(f):
eval_lm.main(input_args)
def rescore_file_name(nbest_dir, prefix_len, scorer_name, lm_file=False,
target_prefix_frac=None, source_prefix_frac=None, backwards=None):
if lm_file:
score_file = nbest_dir+"/lm_score_translations_model_"+scorer_name+".txt"
else:
score_file = nbest_dir+"/"+scorer_name+"_score_translations.txt"
if backwards:
if prefix_len is not None:
score_file += "prefix_len"+str(prefix_len)
elif target_prefix_frac is not None:
score_file += "target_prefix_frac"+str(target_prefix_frac)
else:
if source_prefix_frac is not None:
score_file += "source_prefix_frac"+str(source_prefix_frac)
return score_file
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank.py | examples/noisychannel/rerank.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from multiprocessing import Pool
import numpy as np
from fairseq import options
from fairseq.data import dictionary
from fairseq.scoring import bleu
from . import (
rerank_generate,
rerank_score_bw,
rerank_score_lm,
rerank_options,
rerank_utils,
)
def score_target_hypo(args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize):
print("lenpen", lenpen, "weight1", a, "weight2", b, "weight3", c)
gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst = load_score_files(args)
dict = dictionary.Dictionary()
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
ordered_hypos = {}
ordered_targets = {}
for shard_id in range(len(bitext1_lst)):
bitext1 = bitext1_lst[shard_id]
bitext2 = bitext2_lst[shard_id]
gen_output = gen_output_lst[shard_id]
lm_res = lm_res_lst[shard_id]
total = len(bitext1.rescore_source.keys())
source_lst = []
hypo_lst = []
score_lst = []
reference_lst = []
j = 1
best_score = -math.inf
for i in range(total):
# length is measured in terms of words, not bpe tokens, since models may not share the same bpe
target_len = len(bitext1.rescore_hypo[i].split())
if lm_res is not None:
lm_score = lm_res.score[i]
else:
lm_score = 0
if bitext2 is not None:
bitext2_score = bitext2.rescore_score[i]
bitext2_backwards = bitext2.backwards
else:
bitext2_score = None
bitext2_backwards = None
score = rerank_utils.get_score(a, b, c, target_len,
bitext1.rescore_score[i], bitext2_score, lm_score=lm_score,
lenpen=lenpen, src_len=bitext1.source_lengths[i],
tgt_len=bitext1.target_lengths[i], bitext1_backwards=bitext1.backwards,
bitext2_backwards=bitext2_backwards, normalize=normalize)
if score > best_score:
best_score = score
best_hypo = bitext1.rescore_hypo[i]
if j == gen_output.num_hypos[i] or j == args.num_rescore:
j = 1
hypo_lst.append(best_hypo)
score_lst.append(best_score)
source_lst.append(bitext1.rescore_source[i])
reference_lst.append(bitext1.rescore_target[i])
best_score = -math.inf
best_hypo = ""
else:
j += 1
gen_keys = list(sorted(gen_output.no_bpe_target.keys()))
for key in range(len(gen_keys)):
if args.prefix_len is None:
assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], (
"pred and rescore hypo mismatch: i: " + str(key) + ", "
+ str(hypo_lst[key]) + str(gen_keys[key])
+ str(gen_output.no_bpe_hypo[key])
)
sys_tok = dict.encode_line(hypo_lst[key])
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
else:
full_hypo = rerank_utils.get_full_from_prefix(hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]])
sys_tok = dict.encode_line(full_hypo)
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
# if only one set of hyper parameters is provided, write the predictions to a file
if write_hypos:
# recover the orinal ids from n best list generation
for key in range(len(gen_output.no_bpe_target)):
if args.prefix_len is None:
assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], \
"pred and rescore hypo mismatch:"+"i:"+str(key)+str(hypo_lst[key]) + str(gen_output.no_bpe_hypo[key])
ordered_hypos[gen_keys[key]] = hypo_lst[key]
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[gen_keys[key]]
else:
full_hypo = rerank_utils.get_full_from_prefix(hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]])
ordered_hypos[gen_keys[key]] = full_hypo
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[gen_keys[key]]
# write the hypos in the original order from nbest list generation
if args.num_shards == (len(bitext1_lst)):
with open(target_outfile, 'w') as t:
with open(hypo_outfile, 'w') as h:
for key in range(len(ordered_hypos)):
t.write(ordered_targets[key])
h.write(ordered_hypos[key])
res = scorer.result_string(4)
if write_hypos:
print(res)
score = rerank_utils.parse_bleu_scoring(res)
return score
def match_target_hypo(args, target_outfile, hypo_outfile):
"""combine scores from the LM and bitext models, and write the top scoring hypothesis to a file"""
if len(args.weight1) == 1:
res = score_target_hypo(args, args.weight1[0], args.weight2[0],
args.weight3[0], args.lenpen[0], target_outfile,
hypo_outfile, True, args.normalize)
rerank_scores = [res]
else:
print("launching pool")
with Pool(32) as p:
rerank_scores = p.starmap(score_target_hypo,
[(args, args.weight1[i], args.weight2[i], args.weight3[i],
args.lenpen[i], target_outfile, hypo_outfile,
False, args.normalize) for i in range(len(args.weight1))])
if len(rerank_scores) > 1:
best_index = np.argmax(rerank_scores)
best_score = rerank_scores[best_index]
print("best score", best_score)
print("best lenpen", args.lenpen[best_index])
print("best weight1", args.weight1[best_index])
print("best weight2", args.weight2[best_index])
print("best weight3", args.weight3[best_index])
return args.lenpen[best_index], args.weight1[best_index], \
args.weight2[best_index], args.weight3[best_index], best_score
else:
return args.lenpen[0], args.weight1[0], args.weight2[0], args.weight3[0], rerank_scores[0]
def load_score_files(args):
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
gen_output_lst = []
bitext1_lst = []
bitext2_lst = []
lm_res1_lst = []
for shard_id in shard_ids:
using_nbest = args.nbest_list is not None
pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir = \
rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
args.gen_model_name, shard_id, args.num_shards, args.sampling,
args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None
rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None
score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2)
if args.language_model is not None:
lm_score_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.lm_name, lm_file=True)
# get gen output
predictions_bpe_file = pre_gen+"/generate_output_bpe.txt"
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe,
nbest=using_nbest, prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac)
if rerank1_is_gen:
bitext1 = gen_output
else:
bitext1 = rerank_utils.BitextOutput(score1_file, args.backwards1, args.right_to_left1,
args.remove_bpe, args.prefix_len, args.target_prefix_frac,
args.source_prefix_frac)
if args.score_model2 is not None or args.nbest_list is not None:
if rerank2_is_gen:
bitext2 = gen_output
else:
bitext2 = rerank_utils.BitextOutput(score2_file, args.backwards2, args.right_to_left2,
args.remove_bpe, args.prefix_len, args.target_prefix_frac,
args.source_prefix_frac)
assert bitext2.source_lengths == bitext1.source_lengths, \
"source lengths for rescoring models do not match"
assert bitext2.target_lengths == bitext1.target_lengths, \
"target lengths for rescoring models do not match"
else:
if args.diff_bpe:
assert args.score_model2 is None
bitext2 = gen_output
else:
bitext2 = None
if args.language_model is not None:
lm_res1 = rerank_utils.LMOutput(lm_score_file, args.lm_dict, args.prefix_len,
args.remove_bpe, args.target_prefix_frac)
else:
lm_res1 = None
gen_output_lst.append(gen_output)
bitext1_lst.append(bitext1)
bitext2_lst.append(bitext2)
lm_res1_lst.append(lm_res1)
return gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst
def rerank(args):
if type(args.lenpen) is not list:
args.lenpen = [args.lenpen]
if type(args.weight1) is not list:
args.weight1 = [args.weight1]
if type(args.weight2) is not list:
args.weight2 = [args.weight2]
if type(args.weight3) is not list:
args.weight3 = [args.weight3]
if args.all_shards:
shard_ids = list(range(args.num_shards))
else:
shard_ids = [args.shard_id]
for shard_id in shard_ids:
pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir = \
rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
args.gen_model_name, shard_id, args.num_shards, args.sampling,
args.prefix_len, args.target_prefix_frac, args.source_prefix_frac)
rerank_generate.gen_and_reprocess_nbest(args)
rerank_score_bw.score_bw(args)
rerank_score_lm.score_lm(args)
if args.write_hypos is None:
write_targets = pre_gen+"/matched_targets"
write_hypos = pre_gen+"/matched_hypos"
else:
write_targets = args.write_hypos+"_targets" + args.gen_subset
write_hypos = args.write_hypos+"_hypos" + args.gen_subset
if args.all_shards:
write_targets += "_all_shards"
write_hypos += "_all_shards"
best_lenpen, best_weight1, best_weight2, best_weight3, best_score = \
match_target_hypo(args, write_targets, write_hypos)
return best_lenpen, best_weight1, best_weight2, best_weight3, best_score
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
rerank(args)
if __name__ == '__main__':
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank_options.py | examples/noisychannel/rerank_options.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import options
def get_reranking_parser(default_task='translation'):
parser = options.get_parser('Generation and reranking', default_task)
add_reranking_args(parser)
return parser
def get_tuning_parser(default_task='translation'):
parser = options.get_parser('Reranking tuning', default_task)
add_reranking_args(parser)
add_tuning_args(parser)
return parser
def add_reranking_args(parser):
group = parser.add_argument_group("Reranking")
# fmt: off
group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True,
help='path to first model or ensemble of models for rescoring')
group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False,
help='path to second model or ensemble of models for rescoring')
group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10,
help='the number of candidate hypothesis to rescore')
group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128,
help='batch size for generating the nbest list')
group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'],
help='data subset to generate (train, valid, test)')
group.add_argument('--gen-model', default=None, metavar='FILE',
help='the model to generate translations')
group.add_argument('-b1', '--backwards1', action='store_true',
help='whether or not the first model group is backwards')
group.add_argument('-b2', '--backwards2', action='store_true',
help='whether or not the second model group is backwards')
group.add_argument('-a', '--weight1', default=1, nargs='+', type=float,
help='the weight(s) of the first model')
group.add_argument('-b', '--weight2', default=1, nargs='+', type=float,
help='the weight(s) of the second model, or the gen model if using nbest from interactive.py')
group.add_argument('-c', '--weight3', default=1, nargs='+', type=float,
help='the weight(s) of the third model')
# lm arguments
group.add_argument('-lm', '--language-model', default=None, metavar='FILE',
help='language model for target language to rescore translations')
group.add_argument('--lm-dict', default=None, metavar='FILE',
help='the dict of the language model for the target language')
group.add_argument('--lm-name', default=None,
help='the name of the language model for the target language')
group.add_argument('--lm-bpe-code', default=None, metavar='FILE',
help='the bpe code for the language model for the target language')
group.add_argument('--data-dir-name', default=None,
help='name of data directory')
group.add_argument('--lenpen', default=1, nargs='+', type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--score-dict-dir', default=None,
help='the directory with dictionaries for the scoring models')
group.add_argument('--right-to-left1', action='store_true',
help='whether the first model group is a right to left model')
group.add_argument('--right-to-left2', action='store_true',
help='whether the second model group is a right to left model')
group.add_argument('--remove-bpe', '--post-process', default='@@ ',
help='the bpe symbol, used for the bitext and LM')
group.add_argument('--prefix-len', default=None, type=int,
help='the length of the target prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--sampling', action='store_true',
help='use sampling instead of beam search for generating n best list')
group.add_argument('--diff-bpe', action='store_true',
help='bpe for rescoring and nbest list not the same')
group.add_argument('--rescore-bpe-code', default=None,
help='bpe code for rescoring models')
group.add_argument('--nbest-list', default=None,
help='use predefined nbest list in interactive.py format')
group.add_argument('--write-hypos', default=None,
help='filename prefix to write hypos to')
group.add_argument('--ref-translation', default=None,
help='reference translation to use with nbest list from interactive.py')
group.add_argument('--backwards-score-dict-dir', default=None,
help='the directory with dictionaries for the backwards model,'
'if None then it is assumed the fw and backwards models share dictionaries')
# extra scaling args
group.add_argument('--gen-model-name', default=None,
help='the name of the models that generated the nbest list')
group.add_argument('--model1-name', default=None,
help='the name of the set for model1 group ')
group.add_argument('--model2-name', default=None,
help='the name of the set for model2 group')
group.add_argument('--shard-id', default=0, type=int,
help='the id of the shard to generate')
group.add_argument('--num-shards', default=1, type=int,
help='the number of shards to generate across')
group.add_argument('--all-shards', action='store_true',
help='use all shards')
group.add_argument('--target-prefix-frac', default=None, type=float,
help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--source-prefix-frac', default=None, type=float,
help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)')
group.add_argument('--normalize', action='store_true',
help='whether to normalize by src and target len')
return group
def add_tuning_args(parser):
group = parser.add_argument_group("Tuning")
group.add_argument('--lower-bound', default=[-0.7], nargs='+', type=float,
help='lower bound of search space')
group.add_argument('--upper-bound', default=[3], nargs='+', type=float,
help='upper bound of search space')
group.add_argument('--tune-param', default=['lenpen'], nargs='+',
choices=['lenpen', 'weight1', 'weight2', 'weight3'],
help='the parameter(s) to tune')
group.add_argument('--tune-subset', default='valid', choices=['valid', 'test', 'train'],
help='the subset to tune on ')
group.add_argument('--num-trials', default=1000, type=int,
help='number of trials to do for random search')
group.add_argument('--share-weights', action='store_true',
help='share weight2 and weight 3')
return group
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/__init__.py | examples/noisychannel/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .rerank_options import * # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/examples/noisychannel/rerank_tune.py | examples/noisychannel/rerank_tune.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import random
import numpy as np
from fairseq import options
from . import rerank, rerank_options
def random_search(args):
param_values = []
tuneable_parameters = ['lenpen', 'weight1', 'weight2', 'weight3']
initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3]
for i, elem in enumerate(initial_params):
if type(elem) is not list:
initial_params[i] = [elem]
else:
initial_params[i] = elem
tune_parameters = args.tune_param.copy()
for i in range(len(args.tune_param)):
assert args.upper_bound[i] >= args.lower_bound[i]
index = tuneable_parameters.index(args.tune_param[i])
del tuneable_parameters[index]
del initial_params[index]
tune_parameters += tuneable_parameters
param_values += initial_params
random.seed(args.seed)
random_params = np.array([
[random.uniform(args.lower_bound[i], args.upper_bound[i]) for i in range(len(args.tune_param))]
for k in range(args.num_trials)
])
set_params = np.array([
[initial_params[i][0] for i in range(len(tuneable_parameters))]
for k in range(args.num_trials)
])
random_params = np.concatenate((random_params, set_params), 1)
rerank_args = vars(args).copy()
if args.nbest_list:
rerank_args['gen_subset'] = 'test'
else:
rerank_args['gen_subset'] = args.tune_subset
for k in range(len(tune_parameters)):
rerank_args[tune_parameters[k]] = list(random_params[:, k])
if args.share_weights:
k = tune_parameters.index('weight2')
rerank_args['weight3'] = list(random_params[:, k])
rerank_args = argparse.Namespace(**rerank_args)
best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank(rerank_args)
rerank_args = vars(args).copy()
rerank_args['lenpen'] = [best_lenpen]
rerank_args['weight1'] = [best_weight1]
rerank_args['weight2'] = [best_weight2]
rerank_args['weight3'] = [best_weight3]
# write the hypothesis from the valid set from the best trial
if args.gen_subset != "valid":
rerank_args['gen_subset'] = "valid"
rerank_args = argparse.Namespace(**rerank_args)
rerank.rerank(rerank_args)
# test with the best hyperparameters on gen subset
rerank_args = vars(args).copy()
rerank_args['gen_subset'] = args.gen_subset
rerank_args['lenpen'] = [best_lenpen]
rerank_args['weight1'] = [best_weight1]
rerank_args['weight2'] = [best_weight2]
rerank_args['weight3'] = [best_weight3]
rerank_args = argparse.Namespace(**rerank_args)
rerank.rerank(rerank_args)
def cli_main():
parser = rerank_options.get_tuning_parser()
args = options.parse_args_and_arch(parser)
random_search(args)
if __name__ == '__main__':
cli_main()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/checkpoint_utils.py | fairseq/checkpoint_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import logging
import os
import re
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
logger = logging.getLogger(__name__)
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if args.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = getattr(args, "checkpoint_suffix", "")
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not end_of_epoch
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and args.keep_best_checkpoints > 0:
checkpoint_conds["checkpoint.best_{}_{:.2f}.pt".format(
args.best_checkpoint_metric, val_loss)] = (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
checkpoint_conds["checkpoint_last{}.pt".format(suffix)] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(args.best_checkpoint_metric))
if not args.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[args.keep_best_checkpoints:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(args, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
reset_optimizer = args.reset_optimizer
reset_lr_scheduler = args.reset_lr_scheduler
optimizer_overrides = eval(args.optimizer_overrides)
reset_meters = args.reset_meters
reset_dataloader = args.reset_dataloader
if getattr(args, 'finetune_from_model', None) is not None \
and (reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader):
raise ValueError("--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader")
suffix = getattr(args, "checkpoint_suffix", "")
if args.restore_file == "checkpoint_last.pt": # default value of restore_file is 'checkpoint_last.pt'
checkpoint_path = os.path.join(args.save_dir, "checkpoint_last{}.pt".format(suffix))
first_launch = not PathManager.exists(checkpoint_path)
if getattr(args, 'finetune_from_model', None) is not None and first_launch:
# if there is no last checkpoint to restore, start the finetune from pretrained model
# else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.
if PathManager.exists(args.finetune_from_model):
checkpoint_path = args.finetune_from_model
reset_optimizer = True
reset_lr_scheduler = True
reset_meters = True
reset_dataloader = True
logger.info(f'loading pretrained model from {checkpoint_path}: '
'optimizer, lr scheduler, meters, dataloader will be reset')
else:
raise ValueError(f'--funetune-from-model {args.finetune_from_model} does not exist')
elif getattr(args, "model_parallel_size", 1) > 1:
checkpoint_path = args.restore_file.replace(".pt", suffix + ".pt")
else:
checkpoint_path = args.restore_file
if args.restore_file != "checkpoint_last.pt" and getattr(args, 'finetune_from_model', None):
raise ValueError(
'--finetune-from-model and --restore-file (non-default value) '
'can not be specified together: ' + str(args))
extra_state = trainer.load_checkpoint(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
reset_meters=reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not reset_optimizer
and not reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility)."""
with PathManager.open(path, "rb") as f:
state = torch.load(
f, map_location=lambda s, l: default_restore_location(s, "cpu")
)
args = state["args"]
if arg_overrides is not None:
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state
def load_model_ensemble(filenames, arg_overrides=None, task=None, strict=True, suffix=''):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
ensemble, args, _task = load_model_ensemble_and_task(
filenames, arg_overrides, task, strict, suffix,
)
return ensemble, args
def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None, strict=True, suffix=''):
from fairseq import tasks
ensemble = []
for filename in filenames:
filename = filename.replace(".pt", suffix + ".pt")
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
args = state["args"]
if task is None:
task = tasks.setup_task(args)
# build model for ensemble
model = task.build_model(args)
model.load_state_dict(state["model"], strict=strict, args=args)
ensemble.append(model)
return ensemble, args, task
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(obj, f):
if isinstance(f, str):
with PathManager.open(f, "wb") as h:
torch_persistent_save(obj, h)
return
for i in range(3):
try:
return torch.save(obj, f)
except Exception:
if i == 2:
logger.error(traceback.format_exc())
def save_state(
filename,
args,
model_state_dict,
criterion,
optimizer,
lr_scheduler,
num_updates,
optim_history=None,
extra_state=None,
):
from fairseq import utils
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
"args": args,
"model": model_state_dict or {},
"optimizer_history": optim_history
+ [
{
"criterion_name": criterion.__class__.__name__,
"optimizer_name": optimizer.__class__.__name__,
"lr_scheduler_state": lr_scheduler.state_dict(),
"num_updates": num_updates,
}
],
"extra_state": extra_state,
}
if utils.has_parameters(criterion):
state_dict["criterion"] = criterion.state_dict()
if not args.no_save_optimizer_state:
state_dict["last_optimizer_state"] = optimizer.state_dict()
# convert all state to CPU
state_dict = utils.move_to_cpu(state_dict)
with PathManager.open(filename, "wb") as f:
torch_persistent_save(state_dict, f)
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
from fairseq import models, registry, tasks
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# old model checkpoints may not have separate source/target positions
if hasattr(state["args"], "max_positions") and not hasattr(
state["args"], "max_source_positions"
):
state["args"].max_source_positions = state["args"].max_positions
state["args"].max_target_positions = state["args"].max_positions
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# --raw-text and --lazy-load are deprecated
if getattr(state["args"], "raw_text", False):
state["args"].dataset_impl = "raw"
elif getattr(state["args"], "lazy_load", False):
state["args"].dataset_impl = "lazy"
# epochs start at 1
if state["extra_state"]["train_iterator"] is not None:
state["extra_state"]["train_iterator"]["epoch"] = max(
state["extra_state"]["train_iterator"].get("epoch", 1),
1,
)
# set any missing default values in the task, model or other registries
registry.set_defaults(state["args"], tasks.TASK_REGISTRY[state["args"].task])
registry.set_defaults(state["args"], models.ARCH_MODEL_REGISTRY[state["args"].arch])
for registry_name, REGISTRY in registry.REGISTRIES.items():
choice = getattr(state["args"], registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
registry.set_defaults(state["args"], cls)
return state
def prune_state_dict(state_dict, args):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
if not args or args.arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = (
args.encoder_layers_to_keep if "encoder_layers_to_keep" in vars(args) else None
)
decoder_layers_to_keep = (
args.decoder_layers_to_keep if "decoder_layers_to_keep" in vars(args) else None
)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
[int(layer_string) for layer_string in layers_to_keep.split(",")]
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search(r"\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if "encoder_layers_to_keep" in vars(args):
args.encoder_layers_to_keep = None
if "decoder_layers_to_keep" in vars(args):
args.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning("Unable to access checkpoint save directory: {}".format(save_dir))
raise e
else:
os.remove(temp_file_path)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/search.py | fairseq/search.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional, List
import torch
import torch.nn as nn
from torch import Tensor
from fairseq.token_generation_constraints import ConstraintState, UnorderedConstraintState, OrderedConstraintState
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
self.supports_constraints = False
def step(self, step, lprobs, scores):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
"""Initialize constraint states for constrained decoding (if supported).
Args:
batch_constraints: (torch.Tensor, optional)
the list of constraints, in packed form
beam_size: (int)
the beam size
Returns:
*encoder_out* rearranged according to *new_order*
"""
pass
def prune_sentences(self, batch_idxs: Tensor):
"""
Removes constraint states for completed sentences (if supported).
This is called from sequence_generator._generate() when sentences are
deleted from the batch.
Args:
batch_idxs: Indices of *sentences* whose constraint state should be *kept*.
"""
pass
def update_constraints(self, active_hypos: Tensor):
"""
Updates the constraint states by selecting the beam items that are retained.
This is called at each time step of sequence_generator._generate() when
the set of 2 * {beam_size} candidate hypotheses are reduced to the beam size.
Args:
active_hypos: (batch size, beam size)
list of integers denoting, for each sentence, which beam candidate items
should be kept.
"""
pass
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
self.constraint_states = None
@torch.jit.export
def step(self, step: int, lprobs, scores: Optional[Tensor]):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# At this point, beams_buf and indices_buf are single-dim and contain relative indices
return scores_buf, indices_buf, beams_buf
class LexicallyConstrainedBeamSearch(Search):
"""Implements lexically constrained beam search as described in
Fast Lexically Constrained Decoding with Dynamic Beam
Allocation for Neural Machine Translation. Post & Vilar,
NAACL 2018. https://www.aclweb.org/anthology/N18-1119/
and
Improved Lexically Constrained Decoding for Translation and
Monolingual Rewriting. Hu et al, NAACL
2019. https://www.aclweb.org/anthology/N19-1090/
This is accomplished by maintaining, for each beam hypothesis, a
ConstraintState object (see constraints.py) that tracks which
constraints have been generated and using this information to
shape the beam for each input sentence.
"""
def __init__(self, tgt_dict, representation):
super().__init__(tgt_dict)
self.representation = representation
self.vocab_size = len(tgt_dict)
self.num_cands = 0
self.supports_constraints = True
@torch.jit.export
def init_constraints(self, batch_constraints: Optional[Tensor], beam_size: int):
self.constraint_states = []
for constraint_tensor in batch_constraints:
if self.representation == "ordered":
constraint_state = OrderedConstraintState.create(constraint_tensor)
elif self.representation == "unordered":
constraint_state = UnorderedConstraintState.create(constraint_tensor)
self.constraint_states.append([constraint_state for i in range(beam_size)])
@torch.jit.export
def prune_sentences(self, batch_idxs: Tensor):
self.constraint_states = [self.constraint_states[i] for i in batch_idxs.tolist()]
@torch.jit.export
def update_constraints(self, active_hypos: Tensor):
if self.constraint_states:
batch_size = active_hypos.size(0)
for sentid in range(batch_size):
self.constraint_states[sentid] = [self.constraint_states[sentid][i] for i in active_hypos[sentid]]
@torch.jit.export
def step(self, step: int, lprobs: Tensor, scores: Optional[Tensor]):
"""
A constrained step builds a large candidates list from the following:
- the top 2 * {beam_size} items over the whole beam
- for each item in the beam
- the top {each_k} (default 1)
- all next constraints
We then compute the constrained state of each beam item, and assign
stripe codes: 0 to the best in each bank, 1 to the 2nd-best, and so
on. We then sort by (stripe, score), and truncate the list at
2 * beam size.
Args:
step: the decoder step
lprobs: (batch size, beam size, target vocab)
the target-vocab distributions for each item in the beam.
Retrun: A tuple of (scores, indices, beams, constraints) where:
scores: (batch, output beam size)
the scores of the chosen elements
indices: (batch, output beam size)
the target vocab indices of the chosen elements
beams: (batch, output beam size)
the 0-indexed hypothesis ids of the chosen elements
constraints: (batch, output beam size)
the new constraint states
"""
each_k = 1
device = lprobs.device
batch_size, beam_size, vocab_size = lprobs.size()
self.num_cands = min(
# Just take the k-best. We'll get another k from the 1-best from each
# row, plus more from the constraints
beam_size * 2,
lprobs.view(batch_size, -1).size(1) - 1, # -1 so we never select pad
)
# STEP 0: Preliminary. Prevent EOS for unfinished hyps across all batch items
constraint_states = self.constraint_states
if constraint_states and step > 0:
not_finished_indices = []
for sentno, sent_constraints in enumerate(constraint_states):
for beamno, state in enumerate(sent_constraints):
index = sentno * beam_size + beamno
if not state.finished:
not_finished_indices.append(index)
not_finished_indices = torch.tensor(not_finished_indices)
if not_finished_indices.numel() > 0:
lprobs.view(batch_size * beam_size, -1)[not_finished_indices, self.eos] = -math.inf
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam entry for each batch item
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs = lprobs + scores[:, :, step - 1].unsqueeze(-1)
top_prediction = torch.topk(
lprobs.view(batch_size, -1),
self.num_cands,
)
scores_buf, indices_buf = top_prediction
# Project back into relative indices and beams
beams_buf = indices_buf // vocab_size
indices_buf = indices_buf.fmod(vocab_size)
# Short circuit if there are no constraints in this batch
if not constraint_states:
return scores_buf, indices_buf, beams_buf
# STEP 1: get top-1 from each hypothesis across all sentences in the batch
if step > 0:
top_scores, top_indices = torch.topk(
lprobs.view(batch_size * beam_size, -1),
k=each_k,
dim=1,
)
top_scores = top_scores.view(batch_size, -1)
top_indices = top_indices.view(batch_size, -1)
scores_buf = torch.cat((scores_buf, top_scores), dim=1)
indices_buf = torch.cat((indices_buf, top_indices), dim=1)
new_beams = torch.arange(0, beam_size, device=device).repeat(batch_size, 1)
beams_buf = torch.cat((beams_buf, new_beams), dim=1)
# Now, process sentences in the batch one by one.
new_scores_buf = torch.zeros((batch_size, 2 * beam_size), device=device)
new_indices_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
new_beams_buf = torch.zeros((batch_size, 2 * beam_size), device=device).long()
for sentno, states in enumerate(constraint_states):
scores, indices, beams, new_states = self.step_sentence(step,
sentno,
lprobs[sentno],
constraint_states[sentno],
beams_buf[sentno].clone(),
indices_buf[sentno].clone(),
scores_buf[sentno].clone())
new_scores_buf[sentno] = scores
new_indices_buf[sentno] = indices
new_beams_buf[sentno] = beams
self.constraint_states[sentno] = new_states
return new_scores_buf, new_indices_buf, new_beams_buf
@torch.jit.export
def step_sentence(self,
step: int,
sentno: int,
lprobs: Tensor,
constraint_states: List[List[ConstraintState]],
beams_buf: Tensor,
indices_buf: Tensor,
scores_buf: Tensor):
"""Does per-sentence processing. Adds all constraints for each
hypothesis to the list of candidates; then removes duplicates,
sorts, and dynamically stripes across the banks. All tensor inputs
are collapsed to those pertaining to a single input sentence.
"""
device = lprobs.device
# STEP 2: Add all constraints for each beam item
for beamno, state in enumerate(constraint_states):
next_tokens = torch.tensor(list(state.next_tokens()), device=device).long()
if next_tokens.numel() != 0:
indices_buf = torch.cat((indices_buf, next_tokens))
next_beams = torch.tensor(beamno, device=device).repeat(next_tokens.size(0)).long()
beams_buf = torch.cat((beams_buf, next_beams))
next_values = lprobs[beamno].take(next_tokens.view(-1))
scores_buf = torch.cat((scores_buf, next_values))
# At the 0th time step, there is just one beam item
if step == 0:
break
# STEP 3: Compute the "bank" for each candidate. This is the
# number of constraints it's generated. We need this so that
# we can do round-robin allocation of the beam across these
# banks. If C is the number of constraints, we select the best
# item in bank C, then the best in bank C-1, etc, followed by
# the 2nd-best in bank C, the 2nd-best in bank C-1, etc, and so
# on, until the maximum beam size. We accomplish this by
# creating a sort key and striping across the banks.
# Compute the new states for all candidates
cands_size = indices_buf.size(0)
constraint_states = [constraint_states[beams_buf[i]].advance(indices_buf[i])
for i in range(cands_size)]
banks = torch.tensor([state.bank for state in constraint_states], device=device)
# STEP 4: Sort
num_constraint_tokens = len(state.tokens)
# Sort by keys (bank, score) (i.e., sort banks together, and scores
# within banks). AFAIK pytorch doesn't support either stable sort or
# multi-key sorting, so we have to hack this.
MAX_SCORE = -100
sort_key = (num_constraint_tokens - banks) * MAX_SCORE + scores_buf
sort_values, sort_indices = sort_key.sort(dim=0, descending=True)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
banks = banks[sort_indices]
# Sort the constraints to follow suit
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 5: Remove duplicates. The topk calls (overall and
# per-row) plus the per-row generation of constraints will
# produce duplicates. Here we remove them.
def roll(t):
"""Rolls a 1d tensor left by 1.
[0, 1, 2, 3, 4] becomes [4, 0, 1, 2, 3]
"""
return torch.cat((t[-1].unsqueeze(0), t[0:-1]), dim=0)
# We map candidates (beam, token_id) to a single dimension.
# This is then shifted by 1. We can then easily identify
# duplicates and create a mask that identifies unique
# extensions.
uniques_mask = (beams_buf * (self.vocab_size + 1) + indices_buf)
uniques_mask = roll(uniques_mask) != uniques_mask
# Use the mask to pare down the data structures
scores_buf = torch.masked_select(scores_buf, uniques_mask)
indices_buf = torch.masked_select(indices_buf, uniques_mask)
beams_buf = torch.masked_select(beams_buf, uniques_mask)
banks = torch.masked_select(banks, uniques_mask)
i = 1
for mask in uniques_mask[1:]:
if not mask:
constraint_states.pop(i)
i += mask
# STEP 6: Assign IDs round-robin across banks, sort, and
# truncate. Now that the candidates are sorted by (bank,
# score) and uniqed, we dynamically allocate the {beam_size}
# beam by striping across the candidates. These stripes will
# be used as sort keys to do round-robin selection. This is
# accomplished in a single pass with offsets. Sorting by
# highest-banks (furthest-along hypotheses) first ensures
# progress through the constraints.
#
# e.g., BANKS: 3 3 3 2 2 2 2 1 1 1 0 0
# OLD STRIPES: 0 1 2 0 1 2 3 0 1 2 0 1
# NEW STRIPES: 0 1+4 2+8 0+1 1+5 2+9 3+11 0+2 1+6 2+10 0+3 1+7
# = 0 5 10 1 6 11 13 2 7 12 3 8
#
# Sorting by this then gives the following banks:
#
# 3 2 1 0 3 2 1 0 3 2 1 2
#
# We'll take the top {beam_size} of these.
stripe_offsets = [offset * (len(banks) + 1) for offset in range(len(banks) + 1)]
stripes = torch.zeros_like(banks)
cur_bank_count = -1
cur_bank = banks[0]
for i, bank in enumerate(banks):
if bank != cur_bank:
cur_bank_count = 0
cur_bank = bank
else:
cur_bank_count += 1
stripes[i] = num_constraint_tokens - bank + stripe_offsets[cur_bank_count]
# STEP 7: Sort by the stripes values
sort_values, sort_indices = stripes.sort(dim=0)
scores_buf = scores_buf[sort_indices]
indices_buf = indices_buf[sort_indices]
beams_buf = beams_buf[sort_indices]
constraint_states = [constraint_states[i] for i in sort_indices]
# STEP 8: Truncate to the candidates size!
scores_buf = scores_buf[:self.num_cands]
indices_buf = indices_buf[:self.num_cands]
beams_buf = beams_buf[:self.num_cands]
return scores_buf, indices_buf, beams_buf, constraint_states
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
self.needs_src_lengths = True
def step(self, step: int, lprobs, scores):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step >= max_lens, :, self.eos] = 0
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g,
other=diversity_buf.unsqueeze(1),
alpha=self.diversity_strength,
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1), beam_size, replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(
probs, dim=2, index=indices_buf.unsqueeze(-1)
)
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = final_indices // k
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/file_utils.py | fairseq/file_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
from functools import wraps, partial
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_fairseq')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(
os.getenv('PYTORCH_FAIRSEQ_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv(
'PYTORCH_FAIRSEQ_CACHE', default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, 'r:' + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning("Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url, attempt, timeout, exc_info=e)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(partial(requests.head, url, allow_redirects=True), url)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/registry.py | fairseq/registry.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
REGISTRIES = {}
def setup_registry(
registry_name: str,
base_class=None,
default=None,
required=False,
):
assert registry_name.startswith('--')
registry_name = registry_name[2:].replace('-', '_')
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
'registry': REGISTRY,
'default': default,
}
def build_x(args, *extra_args, **extra_kwargs):
choice = getattr(args, registry_name, None)
if choice is None:
if required:
raise ValueError('--{} is required!'.format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, 'build_' + registry_name):
builder = getattr(cls, 'build_' + registry_name)
else:
builder = cls
set_defaults(args, cls)
return builder(args, *extra_args, **extra_kwargs)
def register_x(name):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError('Cannot register duplicate {} ({})'.format(registry_name, name))
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
'Cannot register {} with duplicate class name ({})'.format(
registry_name, cls.__name__,
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError('{} must extend {}'.format(cls.__name__, base_class.__name__))
REGISTRY[name] = cls
REGISTRY_CLASS_NAMES.add(cls.__name__)
return cls
return register_x_cls
return build_x, register_x, REGISTRY
def set_defaults(args, cls):
"""Helper to set default arguments based on *add_args*."""
if not hasattr(cls, 'add_args'):
return
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False)
cls.add_args(parser)
# copied from argparse.py:
defaults = argparse.Namespace()
for action in parser._actions:
if action.dest is not argparse.SUPPRESS:
if not hasattr(defaults, action.dest):
if action.default is not argparse.SUPPRESS:
setattr(defaults, action.dest, action.default)
for key, default_value in vars(defaults).items():
if not hasattr(args, key):
setattr(args, key, default_value)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/incremental_decoding_utils.py | fairseq/incremental_decoding_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import uuid
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.__bases__ if b != FairseqIncrementalState)
return cls
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/nan_detector.py | fairseq/nan_detector.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger = logging.getLogger(__name__)
class NanDetector:
"""
Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name
"""
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.model = model
self.reset()
for name, mod in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# Dump out all model gnorms to enable better debugging
norm = {}
gradients = {}
for name, param in self.model.named_parameters():
grad_norm = torch.norm(param.grad.data, p=2, dtype=torch.float32)
norm[name] = grad_norm.item()
if torch.isnan(grad_norm).any() or torch.isinf(grad_norm).any():
gradients[name] = param.grad.data
if len(gradients) > 0:
logger.info("Detected nan/inf grad norm, dumping norms...")
logger.info(f"norms: {norm}")
logger.info(f"gradients: {gradients}")
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (
torch.is_floating_point(tensor)
# single value tensors (like the loss) will not provide much info
and tensor.numel() >= 2
):
with torch.no_grad():
if torch.isnan(tensor).any():
err = "NaN"
elif torch.isinf(tensor).any():
err = "Inf"
if err is not None:
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if isinstance(inp, tuple) and len(inp) > 0:
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if err is not None:
if torch.is_tensor(inp) and not backward:
err += (
f" input max: {inp.max().item()}, input min: {inp.min().item()}"
)
has_printed_attr = 'has_printed_b' if backward else 'has_printed_f'
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif isinstance(x, list) or isinstance(x, tuple):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if not self.has_printed_f:
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if not self.has_printed_b:
self._apply(module, inp, output, backward=True)
def close(self):
for hook in self.fhooks + self.bhooks:
hook.remove()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/token_generation_constraints.py | fairseq/token_generation_constraints.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Implements tracking of constraints for a beam item.
A list of constraints is given as a list of one or more token
sequences, each of length at least one token. For example, for an input sentence
> Die maschinelle Übersetzung ist schwer zu kontrollieren.
We could have the constraints:
* to influence
* hard
There are two implementations:
* OrderedConstraintState: Tracks progress through an ordered list of multitoken constraints.
* UnorderedConstraintState: Tracks progress through an unordered list of multitoken constraints.
The difference is that in the first, the constraints are assumed to be
in order; the algorithm will permit zero or more tokens between them.
In the second, the constraints are not ordered, so many orderings will
be explored.
The same sequence can be present any number of times, and will appear
that many times in the output.
"""
import torch
from collections import Counter
from typing import Tuple, List, Optional, Set
class ConstraintState:
def __init__(self):
pass
def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor:
"""Takes a list of list of constraints in tensor form (a list of
tensor constraints for each sentence) and transforms it into a
packed Tensor. For example, here is a batch of size 3 with 3, 0,
and 1 constraints:
[ [ [3 1 2], [3], [4 5 6 7], ]
[],
[ [1 8 9 10 1 4 11 12], ]
]
Its corresponding packed structure is:
[ [ 3 3 1 2 0 3 0 4 5 6 7 0],
[ 0 0 0 0 0 0 0 0 0 0 0 0],
[ 1 1 8 9 10 1 4 11 12 0 0 0] ]
The packed tensor has shape (batch size, maxlen), where
maxlen is defined below. Each row contains concatenated
constraint tokens for that sentence, with 0 appended after
each constraint. The first item in each row is the number
of constraints for that sentence. So maxlen is the maximum
of
(number of constraints) + (sum length of constraints) + 1.
across all sentences in the batch.
"""
# The maximum word length of concatenated constraints for any sentence
max_constraints_len = 1
for sentence_constraints in batch_constraints:
if len(sentence_constraints):
# number of constraints, plus sum of constrain lens, plus a zero after each
constraints_len = 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints)
max_constraints_len = max(max_constraints_len, constraints_len)
batch_size = len(batch_constraints)
constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long()
for i, sentence_constraints in enumerate(batch_constraints):
constraints_tensor[i, 0] = len(sentence_constraints)
offset = 1
for j, constraint in enumerate(sentence_constraints):
this_len = constraint.size(0)
constraints_tensor[i, offset:offset+this_len] = constraint
offset += this_len + 1
return constraints_tensor.long()
def unpack_constraints(constraint_tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Transforms *one row* of a packed constraint tensor (e.g., for one
sentence in the batch) into a list of constraint tensors.
"""
constraint_list = []
num_constraints = constraint_tensor[0]
constraints = constraint_tensor.tolist()
offset = 1
for i in range(num_constraints):
where = constraints.index(0, offset)
constraint_list.append(constraint_tensor[offset:where])
offset = where + 1
return constraint_list
class ConstraintNode:
"""
Represents a node in a trie managing unordered constraints.
"""
def __init__(self, token: int = None, parent=None):
# The token associate with this node (None for the root)
self.token = int(token) if token is not None else None
# The parent (None at the root)
self.parent = parent
# Whether this node is a completed constraint
self.terminal = 0
# List of child nodes
self.children = {}
# The cumulative number of constraints from this point in the
# trie forward
self.num_constraints = 0
@property
def id(self):
return self.token
def __str__(self):
term = self.terminal != 0
return f"[{self.token}].{term}#{self.num_constraints}"
def __getitem__(self, key: int):
return self.children.get(key, None)
def next_tokens(self) -> Set[int]:
"""The set of child labels."""
return set(self.children.keys())
@staticmethod
def create(constraints: List[List[int]]):
root = ConstraintNode()
for sequence in constraints:
root.add_sequence(sequence)
return root
@staticmethod
def print_graph(node: "ConstraintNode"):
if len(node.children) == 0:
return str(node)
else:
s = f"({node}"
for child in node.children.values():
s += " " + ConstraintNode.print_graph(child)
s += ")"
return s
def token_counts(self) -> Counter:
"""Returns a counter of the number of times each token is used
in a constraint.
"""
token_counts = Counter()
kids = list(self.children.values())
while len(kids) > 0:
kid = kids.pop()
token_counts[kid.id] += kid.num_constraints
kids += list(kid.children.values())
return token_counts
def tokens(self) -> Set[int]:
"""Returns the set of tokens in constraints."""
return set(self.token_counts().keys())
def add_sequence(self, sequence: List[int]):
"""Adds a constraint, represented as a list of integers, to
the trie."""
assert len(sequence) > 0
token = int(sequence[0])
if token not in self.children:
self.children[token] = ConstraintNode(token, parent=self)
node = self.children[token]
if len(sequence) == 1:
node.terminal += 1
node.num_constraints += 1
parent = node.parent
while parent is not None:
parent.num_constraints += 1
parent = parent.parent
else:
node.add_sequence(sequence[1:])
class UnorderedConstraintState(ConstraintState):
"""
Records progress through the set of constraints for each item in the beam
using a trie.
"""
def __init__(self,
node: ConstraintNode,
copy_from: "ConstraintState" = None):
self.node = node
if copy_from is None:
# The root node
self.root = node
# The set of states in the graph that have been completed
self.completed = Counter()
# The...
self.generated = Counter()
# The list of tokens we need to generate
self.needed_tokens = self.root.tokens()
else:
self.completed = Counter(copy_from.completed)
self.generated = Counter(copy_from.generated)
self.root = copy_from.root
# Mark the node as generated
if self.node != self.root:
self.generated[node] += 1
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
constraint_trie_root = ConstraintNode.create(constraint_list)
return UnorderedConstraintState(constraint_trie_root)
def __str__(self):
gen_str = ",".join([str(node) for node in self.generated])
return f"{self.name}/{self.bank}({gen_str})x{self.num_completed}"
def __copy__(self):
copied_state = UnorderedConstraintState(self.node, copy_from=self)
return copied_state
def copy(self):
return self.__copy__()
@property
def name(self):
if self.node.id is None:
return "ROOT"
else:
return str(self.node.id)
@property
def is_root(self):
return self.node == self.root
@property
def bank(self):
return sum(self.generated.values())
@property
def num_completed(self):
"""The number of constraints (not constraint tokens) that are completed.
In addition to the already-completed states, we need to account for the
current state, which might get marked as completed when another token
is generated.
"""
in_final = self.node.terminal and self.completed[self.node] < self.node.terminal
return sum(self.completed.values()) + in_final
@property
def finished(self):
return self.root.num_constraints - self.num_completed == 0
@property
def token_counts(self):
return self.root.token_counts()
@property
def tokens(self):
return self.root.tokens()
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
if self.node != self.root:
return self.root.next_tokens().union(self.node.next_tokens())
else:
return self.root.next_tokens()
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
next_state = None
child = self.node[token]
if child is not None and self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
def rewind():
"""If we're mid-trie and an "illegal" token is chosen next, we need
to reset our state to the root state. However, along the way, we need
to check whether a prefix of the current trie state represents a state
we could mark as completed.
"""
node = self.node
while node != self.root:
if node.terminal and self.completed[node] < node.terminal:
next_state.completed[node] += 1
return
next_state.generated[node] -= 1
node = node.parent
# Fall off the graph, check the root
if next_state is None and token in self.root.next_tokens():
child = self.root[token]
# We can only traverse this edge if it's not saturated
if self.generated[child] < child.num_constraints:
next_state = UnorderedConstraintState(child, copy_from=self)
else:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
elif next_state is None:
next_state = UnorderedConstraintState(self.root, copy_from=self)
# Rewind
rewind()
return next_state
class ConstraintSequence:
def __init__(self, sequences: List[List[int]]):
"""Represents a set of possibly multitoken constraints by
concatenating them and internally recording the end points.
"""
self.sequences = []
self.endpoints = []
self.num_tokens = 0
self.tokens = set()
for sequence in sequences:
for token in sequence:
self.tokens.add(token)
self.num_tokens += len(sequence)
self.endpoints += [False for x in range(len(sequence) - 1)] + [True]
self.sequences += sequence
def __getitem__(self, key: int):
return self.sequences[key]
def __len__(self):
return len(self.sequences)
def __str__(self):
return str(self.sequences)
class OrderedConstraintState(ConstraintState):
"""
Records progress through the set of linear nonbranching constraints with gaps.
"""
def __init__(self,
sequence: ConstraintSequence,
state: int = -1):
self.sequence = sequence
self.state = state
@staticmethod
def create(constraint_tensor: torch.Tensor):
constraint_list = unpack_constraints(constraint_tensor)
return OrderedConstraintState(ConstraintSequence(constraint_list), -1)
def __str__(self):
return f"{self.state}/{self.bank}x{self.num_completed}"
def __copy__(self):
return OrderedConstraintState(self.sequence, self.state)
def copy(self):
return self.__copy__()
@property
def num_completed(self):
if self.state == -1:
return 0
count = len(list(filter(lambda x: x, self.sequence.endpoints[0:self.state+1])))
return count
@property
def is_root(self):
return self.state == -1
@property
def name(self):
if self.state == -1:
return "ROOT"
else:
return str(self.sequence[self.state])
@property
def bank(self) -> int:
return self.state + 1
@property
def finished(self):
return self.state + 1 == len(self.sequence)
@property
def token_counts(self):
return self.sequence.token_counts()
@property
def tokens(self):
return self.sequence.tokens
@property
def num_constraint_tokens(self):
return sum(self.token_counts.values())
def next_tokens(self) -> Set[int]:
"""Returns the list of tokens that could come next.
These are (a) all tokens extending the root state and, for
non-root states, additionally all tokens extending the current
state."""
tokens = set()
if self.state > 0:
tokens.add(self.sequence[0])
if not self.finished:
tokens.add(self.sequence[self.state + 1])
return tokens
def advance(self, token: int):
"""Reads in a token and advances the state. Here's how it works.
We can advance to the next state if:
- there is a matching child
- its path isn't blocked
A path is blocked when all constraints that are descendants of
that node have already been generated, in the current state.
If we are not able to advance from the current state, we "fall
off the graph" and return to the root state. There, we again
try to advance, checking the same criteria.
In any case, when falling off the graph, we need to do some
bookkeeping. We:
- check whether any constraints were met (all prefixes of
current state)
- if one is found, mark it as completed
- adjust visited nodes accordingly
"""
token = int(token)
# print(f"{self} ADVANCE({token}) {self.sequence} -> ", end="")
if self.finished:
# Accept anything
next_state = self.copy()
elif self.sequence[self.state + 1] == token:
# Advance to the next token
next_state = OrderedConstraintState(self.sequence, self.state + 1)
elif self.sequence.endpoints[self.state]:
# Accept anything between constraints (*)
next_state = self.copy()
elif token == self.sequence[0]:
# Start over having generated the first token
next_state = OrderedConstraintState(self.sequence, 0)
else:
# Start over from the root
next_state = OrderedConstraintState(self.sequence, -1)
return next_state
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/distributed_utils.py | fairseq/distributed_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import pickle
import random
import socket
import struct
import subprocess
import warnings
from collections import OrderedDict
from typing import Any, Dict, Mapping
import torch
import torch.distributed as dist
from fairseq import utils
logger = logging.getLogger(__name__)
def is_master(args):
return args.distributed_rank == 0
def infer_init_method(args, force_distributed=False):
if args.distributed_init_method is not None or getattr(args, 'tpu', False):
return
if args.pipeline_model_parallel:
if args.pipeline_balance is None:
raise ValueError('--pipeline-balance is currently required for pipeline model parallelism')
if args.pipeline_devices is None:
raise ValueError('--pipeline-devices is currently required for pipeline model parallelism')
gpus_per_node = torch.cuda.device_count()
num_pipeline_devices = len(set(args.pipeline_devices))
assert gpus_per_node >= num_pipeline_devices and gpus_per_node % num_pipeline_devices == 0, (
'the number of unique device IDs in --pipeline-devices must evenly divide '
'the number of GPUs per node (multi-node pipelining is not yet supported)'
)
num_pipelines_per_node = gpus_per_node // num_pipeline_devices
# support torch.distributed.launch
if all(key in os.environ for key in [
'MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'
]):
args.distributed_init_method = 'env://'
args.distributed_world_size = int(os.environ['WORLD_SIZE'])
args.distributed_rank = int(os.environ['RANK'])
# processes are created by torch.distributed.launch
args.distributed_no_spawn = True
# we can determine the init method automatically for Slurm
elif args.distributed_port > 0:
node_list = os.environ.get('SLURM_STEP_NODELIST')
if node_list is None:
node_list = os.environ.get('SLURM_JOB_NODELIST')
if node_list is not None:
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(
host=hostnames.split()[0].decode('utf-8'),
port=args.distributed_port,
)
nnodes = int(os.environ.get('SLURM_NNODES'))
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get('SLURM_NTASKS'))
nnodes = int(os.environ.get('SLURM_NNODES'))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get('SLURM_NODEID'))
args.distributed_rank = node_id * gpus_per_node
args.distributed_world_size = nnodes * gpus_per_node
elif args.pipeline_model_parallel:
assert ntasks_per_node == num_pipelines_per_node, (
'SLURM --ntasks-per-node must match number of pipelines per '
'node (={})'.format(num_pipelines_per_node)
)
args.distributed_no_spawn = True
# For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on
# the first node, [1, 2] on the second node, etc. This
# matches torch.distributed.launch.
node_id = int(os.environ.get('SLURM_NODEID'))
local_id = int(os.environ.get('SLURM_LOCALID'))
args.distributed_rank = node_id * num_pipelines_per_node + local_id
# In the above example, device_id will always be in [0, 1],
# which also matches torch.distributed.launch.
args.device_id = local_id
# We also want to set distributed_world_size to be the total
# number of pipelines across all nodes.
args.distributed_world_size = nnodes * num_pipelines_per_node
else:
assert ntasks_per_node == args.distributed_world_size // nnodes
args.distributed_no_spawn = True
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
elif args.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
if args.pipeline_model_parallel:
if not args.distributed_no_spawn:
# When distributed_no_spawn is False, we expect distributed_rank and
# distributed_world_size to be based on the total number of GPUs, so
# we need to correct them to be based on the number of pipelines.
assert args.distributed_world_size % num_pipeline_devices == 0
args.distributed_world_size = args.distributed_world_size // num_pipeline_devices
# In the case of 4-way MP on nodes with 8 GPUs, we want
# distributed_rank to be the starting GPU index for each pipeline
# i.e., 0, 2, ...
assert args.distributed_rank % gpus_per_node == 0
assert args.distributed_rank % num_pipeline_devices == 0
args.distributed_rank = args.distributed_rank // num_pipeline_devices
# launch one process per pipeline
args.distributed_num_procs = num_pipelines_per_node
# if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0
# and 4, indicating the starting device IDs for each pipeline
args.device_id *= num_pipeline_devices
if args.device_id > 0:
# if there's multiple pipelines on a node (e.g., 4-way MP on an 8
# GPU node), we need to adjust pipeline_devices accordingly
logger.debug(
"setting CUDA device={} on rank {}"
.format(args.device_id, args.distributed_rank)
)
torch.cuda.set_device(args.device_id)
args.pipeline_devices = [args.device_id + d for d in args.pipeline_devices]
logger.info(
"setting pipeline_devices={} on rank {}"
.format(args.pipeline_devices, args.distributed_rank),
)
elif not args.distributed_no_spawn:
args.distributed_num_procs = min(
torch.cuda.device_count(),
args.distributed_world_size,
)
def distributed_init(args):
if not getattr(args, 'tpu', False):
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(
args.distributed_rank, args.distributed_init_method,
))
dist.init_process_group(
backend=args.distributed_backend,
init_method=args.distributed_init_method,
world_size=args.distributed_world_size,
rank=args.distributed_rank,
)
logger.info('initialized host {} as rank {}'.format(
socket.gethostname(), args.distributed_rank,
))
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
args.distributed_rank = torch.distributed.get_rank()
else:
import torch_xla.core.xla_model as xm
assert xm.xrt_world_size() == args.distributed_world_size
args.device_id = xm.get_local_ordinal()
args.distributed_rank = xm.get_ordinal()
xm.rendezvous('distributed_init') # wait for all workers
xm.mark_step()
if not is_master(args):
logging.getLogger().setLevel(logging.WARNING)
if args.model_parallel_size > 1:
try:
from fairseq.model_parallel.megatron.mpu import (
get_model_parallel_rank,
initialize_model_parallel,
model_parallel_cuda_manual_seed,
)
except ImportError:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
initialize_model_parallel(args.model_parallel_size)
model_parallel_cuda_manual_seed(args.seed)
model_part_number = get_model_parallel_rank()
args.checkpoint_suffix += '-model_part-{0}'.format(model_part_number)
return args.distributed_rank
def distributed_main(i, main, args, kwargs):
args.device_id = i
if torch.cuda.is_available() and not args.cpu and not getattr(args, "tpu", False):
torch.cuda.set_device(args.device_id)
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = kwargs.pop('start_rank', 0) + i
args.distributed_rank = distributed_init(args)
after_distributed_init_fn = kwargs.pop('after_distributed_init_fn', None)
if after_distributed_init_fn:
args = after_distributed_init_fn(args)
main(args, **kwargs)
def call_main(args, main, **kwargs):
if args.distributed_init_method is None:
infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
kwargs['start_rank'] = start_rank
torch.multiprocessing.spawn(
fn=distributed_main,
args=(main, args, kwargs),
nprocs=args.distributed_num_procs,
)
else:
distributed_main(args.device_id, main, args, kwargs)
elif getattr(args, "tpu", False):
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy("file_system")
xmp.spawn(
fn=distributed_main,
args=(main, args, kwargs),
nprocs=8, # use all 8 TPU cores
)
else:
# single GPU main
main(args, **kwargs)
def get_rank():
return dist.get_rank()
def get_world_size():
return dist.get_world_size()
def get_default_group():
return dist.group.WORLD
def all_reduce(tensor, group=None):
if isinstance(group, tuple) and group[0] == 'tpu':
import torch_xla.core.xla_model as xm
return xm.all_reduce('sum', [tensor], groups=group[1])
else:
if group is None:
group = get_default_group()
return dist.all_reduce(tensor, group=group)
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
rank = get_rank()
world_size = get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4 # size of header that contains the length of the encoded data
size = header_size + enc_size
if size > max_size:
raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))
header = struct.pack(">I", enc_size)
cpu_buffer[:size] = torch.ByteTensor(list(header + enc))
start = rank * max_size
buffer[start:start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size:(i + 1) * max_size]
enc_size, = struct.unpack(">I", bytes(out_buffer[:header_size].tolist()))
if enc_size > 0:
result.append(pickle.loads(bytes(out_buffer[header_size:header_size + enc_size].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data. '
'Try rerunning with --ddp-backend=no_c10d and see if that helps.'
)
def all_reduce_dict(
data: Mapping[str, Any],
device,
group=None,
) -> Dict[str, Any]:
"""
AllReduce a dictionary of values across workers. We separately
reduce items that are already on the device and items on CPU for
better performance.
Args:
data (Mapping[str, Any]): dictionary of data to all-reduce, but
cannot be a nested dictionary
device (torch.device): device for the reduction
group (optional): group of the collective
"""
data_keys = list(data.keys())
# We want to separately reduce items that are already on the
# device and items on CPU for performance reasons.
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if not torch.is_tensor(t):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif t.device.type != device.type:
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if len(data) == 0:
return data
buf = torch.stack(list(data.values())).to(device=device)
all_reduce(buf, group=group)
return {k: buf[i] for i, k in enumerate(data)}
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if key in cpu_data:
return cpu_data[key]
elif key in device_data:
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/utils.py | fairseq/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import importlib.util
import logging
import math
import os
import sys
import warnings
from collections import defaultdict
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import iterators
from fairseq.logging.meters import safe_round
from fairseq.modules import gelu, gelu_accurate
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
def split_paths(paths: str) -> List[str]:
return paths.split(os.pathsep) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP)
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, '__len__') and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return tuple(_apply(x) for x in x)
elif isinstance(x, set):
return {_apply(x) for x in x}
else:
return x
return _apply(sample)
def move_to_cuda(sample, device=None):
device = device or torch.cuda.current_device()
def _move_to_cuda(tensor):
# non_blocking is ignored if tensor is not pinned, so we can always set
# to True (see github.com/PyTorchLightning/pytorch-lightning/issues/620)
return tensor.cuda(device=device, non_blocking=True)
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
# PyTorch has poor support for half tensors (float16) on CPU.
# Move any such tensors to float32.
if tensor.dtype in {torch.bfloat16, torch.float16}:
tensor = tensor.to(dtype=torch.float32)
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None
):
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left: bool = False, left_to_right: bool = False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
buffered = torch.empty(0).long()
if max_len > 0:
torch.arange(max_len, out=buffered)
range = buffered.type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def multi_tensor_total_norm(grads, chunk_size=2048*32) -> torch.Tensor:
per_device_grads = {}
norms = []
for grad in grads:
device = grad.device
cur_device_grads = per_device_grads.get(device)
if cur_device_grads is None:
cur_device_grads = []
per_device_grads[device] = cur_device_grads
cur_device_grads.append(grad)
for device in per_device_grads.keys():
cur_device_grads = per_device_grads[device]
if device.type == "cuda":
# TODO(msb) return has_inf
has_inf = torch.zeros((1, 1), dtype=torch.int, device=device)
with torch.cuda.device(device):
norm = multi_tensor_l2norm(chunk_size, has_inf, [cur_device_grads], False)
norms.append(norm[0].to(torch.cuda.current_device()))
else:
norms += [torch.norm(g, p=2, dtype=torch.float32) for g in cur_device_grads]
total_norm = torch.norm(torch.stack(norms))
return total_norm
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.)
else:
return torch.tensor(0.)
if len(grads) == 1:
total_norm = torch.norm(grads[0], p=2, dtype=torch.float32)
else:
if multi_tensor_l2norm_available:
total_norm = multi_tensor_total_norm(grads)
else:
device = torch.device('cpu')
if torch.cuda.is_available():
warnings.warn(
"amp_C fused kernels unavailable, disabling multi_tensor_l2norm; "
"you may get better performance by installing NVIDIA's apex library"
)
device = torch.cuda.current_device()
total_norm = torch.norm(
torch.stack([torch.norm(g, p=2, dtype=torch.float32).to(device) for g in grads])
)
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return tuple([arg_number] * len(arg_structure))
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.
try:
return safe_round(base ** loss, round)
except OverflowError:
return float('inf')
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def model_eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def set_torch_seed(seed):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
assert isinstance(seed, int)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@contextlib.contextmanager
def with_torch_seed(seed):
assert isinstance(seed, int)
rng_state = torch.get_rng_state()
cuda_rng_state = torch.cuda.get_rng_state()
set_torch_seed(seed)
yield
torch.set_rng_state(rng_state)
torch.cuda.set_rng_state(cuda_rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero(as_tuple=False).squeeze(dim=-1)
src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero(as_tuple=False).squeeze(dim=-1)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
def get_tpu_device(args):
import torch_xla.core.xla_model as xm
return xm.xla_device()
def tpu_data_loader(itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
xm.rendezvous("tpu_data_loader") # wait for all workers
xm.mark_step()
device = xm.xla_device()
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, "n", 0),
total=len(itr),
)
class CudaEnvironment(object):
def __init__(self):
cur_device = torch.cuda.current_device()
prop = torch.cuda.get_device_properties("cuda:{}".format(cur_device))
self.name = prop.name
self.major = prop.major
self.minor = prop.minor
self.total_memory_in_GB = prop.total_memory / 1024 / 1024 / 1024
@staticmethod
def pretty_print_cuda_env_list(cuda_env_list):
"""
Given a list of CudaEnviorments, pretty print them
"""
num_workers = len(cuda_env_list)
center = "CUDA enviroments for all {} workers".format(num_workers)
banner_len = 40 - len(center) // 2
first_line = "*" * banner_len + center + "*" * banner_len
logger.info(first_line)
for r, env in enumerate(cuda_env_list):
logger.info(
"rank {:3d}: ".format(r)
+ "capabilities = {:2d}.{:<2d} ; ".format(env.major, env.minor)
+ "total memory = {:.3f} GB ; ".format(env.total_memory_in_GB)
+ "name = {:40s}".format(env.name)
)
logger.info(first_line)
def csv_str_list(x):
return x.split(',')
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/hub_utils.py | fairseq/hub_utils.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import List, Dict, Iterator, Tuple, Any
import torch
from torch import nn
from fairseq import utils
from fairseq.data import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_model',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.prepare_for_inference_(args)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
inference_step_args=None,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args)
inference_step_args = inference_step_args or {}
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(
generator, self.models, batch, **inference_step_args
)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if hypo['alignment'] is not None and getarg('print_alignment', False):
logger.info('A\t{}'.format(
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in hypo['alignment']])
))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
disable_iterator_cache=True,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/binarizer.py | fairseq/binarizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from fairseq.tokenizer import tokenize_line
import torch
from fairseq.file_io import PathManager
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(PathManager.get_local_path(filename), "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(PathManager.get_local_path(filename), "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/trainer.py | fairseq/trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
from itertools import chain
import logging
import sys
import time
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, distributed_utils, models, optim, utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion, quantizer=None):
self.args = args
self.task = task
# catalog shared parameters
shared_params = _catalog_shared_params(model)
self.tpu = getattr(args, 'tpu', False)
self.cuda = torch.cuda.is_available() and not args.cpu and not self.tpu
if self.cuda:
self.device = torch.device('cuda')
elif self.tpu:
self.device = utils.get_tpu_device(args)
else:
self.device = torch.device('cpu')
# copy model and criterion to current device/dtype
self._criterion = criterion
self._model = model
if self.tpu:
import torch_xla.core.xla_model as xm
self._model = xm.send_cpu_data_to_device(self._model, self.device)
if args.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
elif args.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
if not args.pipeline_model_parallel:
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = args.pipeline_model_parallel
self.last_device = None
if self.cuda and self.pipeline_model_parallel:
self.last_device = torch.device(args.pipeline_devices[-1])
# check that shared parameters are preserved after device transfer
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info(
'detected shared parameter: {} <- {}'.format(shared_param[0], path)
)
_set_module_by_path(self._model, path, ref)
self._dummy_batch = "DUMMY" # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0 # for TPUs
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
# TODO(myleott): support tpu
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if self.quantizer is not None:
self.quantizer.set_trainer(self)
# get detailed cuda environment
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if self.data_parallel_world_size > 1:
self.cuda_env_arr = distributed_utils.all_gather_list(self.cuda_env)
else:
self.cuda_env_arr = [self.cuda_env]
if self.data_parallel_rank == 0:
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time("wall", priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
"""Reinitialize the Trainer, typically after model params change."""
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
@property
def data_parallel_world_size(self):
return self.args.distributed_world_size
@property
def data_parallel_process_group(self):
if self.tpu:
return ('tpu', None)
else:
return None
@property
def data_parallel_rank(self):
return self.args.distributed_rank
@property
def is_data_parallel_master(self):
return distributed_utils.is_master(self.args)
@property
def criterion(self):
if self._wrapped_criterion is None:
if (
utils.has_parameters(self._criterion)
and self.data_parallel_world_size > 1
and not self.args.use_bmuf
and not self.tpu
):
self._wrapped_criterion = models.DistributedFairseqModel(
self.args, self._criterion,
process_group=self.data_parallel_process_group
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if (
self.data_parallel_world_size > 1
and not self.args.use_bmuf
and not self.tpu
):
self._wrapped_model = models.DistributedFairseqModel(
self.args, self._model,
process_group=self.data_parallel_process_group
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.args.fp16 or self.args.bf16:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if self.args.memory_efficient_fp16 or self.args.memory_efficient_bf16:
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.args, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info("NOTE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.args, params)
if self.args.use_bmuf:
self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)
if self.args.zero_sharding == 'os':
if (self.args.fp16
and not self.args.memory_efficient_fp16
and not self.args.memory_efficient_bf16
) and not self.args.fp16_no_flatten_grads:
raise ValueError(
"ZeRO is incomptabile with fp16 and flattened grads. "
"Please use --fp16-no-flatten-grads"
)
else:
optim.shard_(self.args, self._optimizer)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
self._lr_scheduler.step_update(0)
def consolidate_optimizer(self):
"""For OSS, we need to consolidate the state dict."""
if hasattr(self.optimizer.optimizer, "consolidate_state_dict"):
self.optimizer.optimizer.consolidate_state_dict()
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if self.is_data_parallel_master: # only save one checkpoint
extra_state["metrics"] = metrics.state_dict()
extra_state["previous_training_time"] = self.cumulative_training_time()
checkpoint_utils.save_state(
filename,
self.args,
self.get_model().state_dict(),
self.get_criterion(),
self.optimizer,
self.lr_scheduler,
self.get_num_updates(),
self._optim_history,
extra_state,
)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = None, [], None
bexists = PathManager.isfile(filename)
if bexists:
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
# load model parameters
try:
self.get_model().load_state_dict(
state["model"], strict=True, args=self.args
)
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
last_optim_state = state.get("last_optimizer_state", None)
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), "Criterion does not match; please reset the optimizer (--reset-optimizer)."
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), "Optimizer does not match; please reset the optimizer (--reset-optimizer)."
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
epoch = extra_state["train_iterator"]["epoch"]
logger.info(
"loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
if "previous_training_time" in extra_state:
self._previous_training_time = extra_state["previous_training_time"]
self._start_time = time.time()
self.lr_step(epoch)
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
else:
logger.info("no existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.args.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
)
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.args.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.args.num_workers,
epoch=epoch,
data_buffer_size=self.args.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
def get_valid_iterator(
self,
subset,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
return self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.args.max_tokens_valid,
max_sentences=self.args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.args.num_workers,
data_buffer_size=self.args.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('begin_epoch') # wait for all workers
xm.mark_step()
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
if self._dummy_batch == "DUMMY":
self._dummy_batch = samples[0]
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
is_dummy_batch = False
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if self.args.distributed_world_size == 1:
return None
else:
raise e
if self.tpu and i < len(samples) - 1:
# tpu-comment: every XLA operation before marking step is
# appended to the IR graph, and processing too many batches
# before marking step can lead to OOM errors.
# To handle gradient accumulation use case, we explicitly
# mark step here for every forward pass without a backward pass
import torch_xla.core.xla_model as xm
xm.mark_step()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
# gather logging outputs from all replicas
if self._sync_stats():
train_time = self._local_cumulative_training_time()
logging_outputs, (sample_size, ooms, total_train_time) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch,
)
self._cumulative_training_time = total_train_time / self.data_parallel_world_size
if hasattr(self.model, 'all_reduce'):
self.model.all_reduce()
overflow = False
try:
if self.tpu and self.data_parallel_world_size > 1:
import torch_xla.core.xla_model as xm
gradients = xm._fetch_gradients(self.optimizer.optimizer)
xm.all_reduce('sum', gradients, scale=1.0 / self.data_parallel_world_size)
with torch.autograd.profiler.record_function("multiply-grads"):
# multiply gradients by (# GPUs / sample_size) since DDP
# already normalizes by the number of GPUs. Thus we get
# (sum_of_gradients / sample_size).
if not self.args.use_bmuf:
self.optimizer.multiply_grads(self.data_parallel_world_size / sample_size)
elif sample_size > 0: # BMUF needs to check sample size
num = self.data_parallel_world_size if self._sync_stats() else 1
self.optimizer.multiply_grads(num / sample_size)
with torch.autograd.profiler.record_function("clip-grads"):
# clip grads
grad_norm = self.clip_grad_norm(self.args.clip_norm)
# check that grad norms are consistent across workers
if (
not self.args.use_bmuf
and self.args.distributed_wrapper != 'SlowMo'
and not self.tpu
):
self._check_grad_norms(grad_norm)
with torch.autograd.profiler.record_function("optimizer"):
# take an optimization step
self.optimizer.step()
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print
# out where it fails
with NanDetector(self.model):
self.task.train_step(
sample, self.model, self.criterion, self.optimizer, self.get_num_updates(),
ignore_grad=False
)
raise
except OverflowError as e:
overflow = True
logger.info("NOTE: overflow detected, " + str(e))
grad_norm = torch.tensor(0.).cuda()
self.zero_grad()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
# Some distributed wrappers (e.g., SlowMo) need access to the optimizer after the step
if hasattr(self.model, 'perform_additional_optimizer_actions'):
if hasattr(self.optimizer, 'fp32_params'):
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer, self.optimizer.fp32_params)
else:
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer)
if not overflow or self.args.distributed_wrapper == 'SlowMo':
self.set_num_updates(self.get_num_updates() + 1)
if self.tpu:
# mark step on TPUs
import torch_xla.core.xla_model as xm
xm.mark_step()
# only log stats every log_interval steps
# this causes wps to be misreported when log_interval > 1
logging_output = {}
if self.get_num_updates() % self.args.log_interval == 0:
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm,
)
# log whenever there's an XLA compilation, since these
# slow down training and may indicate opportunities for
# optimization
self._check_xla_compilation()
else:
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm,
)
# clear CUDA cache to reduce memory fragmentation
if (
self.cuda
and self.args.empty_cache_freq > 0
and (
(self.get_num_updates() + self.args.empty_cache_freq - 1)
% self.args.empty_cache_freq
) == 0
):
torch.cuda.empty_cache()
if self.args.fp16:
metrics.log_scalar(
"loss_scale",
self.optimizer.scaler.loss_scale,
priority=700,
round=4,
weight=0,
)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('valid_step') # wait for all workers
xm.mark_step()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
is_dummy_batch = False
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size, ) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ignore=is_dummy_batch,
)
# log validation stats
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if 'get_meter' not in self._warn_once:
self._warn_once.add('get_meter')
utils.deprecation_warning(
'Trainer.get_meter is deprecated. Please use fairseq.metrics instead.'
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_"):]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None)
def cumulative_training_time(self):
if self._cumulative_training_time is None:
# single GPU
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
"""Aggregate training time in seconds."""
return time.time() - self._start_time + self._previous_training_time
def _prepare_sample(self, sample):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
return None
if self.cuda:
if self.pipeline_model_parallel:
if 'target' in sample:
sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device)
else:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.args.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
utils.set_torch_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
if self.data_parallel_world_size == 1:
return False
elif self.args.use_bmuf:
return (
(self.get_num_updates() + 1) % self.args.global_sync_iter == 0
and (self.get_num_updates() + 1) > self.args.warmup_iterations
)
else:
return True
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | true |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/legacy_distributed_data_parallel.py | fairseq/legacy_distributed_data_parallel.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This version is simpler than the latest PyTorch
version and is useful for debugging. Notably it does not overlap gradient
communication with the backward pass, which makes it slower but more robust
than the PyTorch version.
This version also supports the *no_sync* context manager, which allows faster
training with `--update-freq`.
"""
from collections import OrderedDict
from contextlib import contextmanager
import copy
import torch
from torch import nn
from torch.autograd import Variable
from . import distributed_utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
world_size (int): number of parallel workers
process_group (optional): the c10d process group to be used for
distributed data all-reduction. If None, the default process group
will be used.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, world_size, process_group=None, buffer_size=2**28):
super().__init__()
self.module = module
self.world_size = world_size
self.process_group = process_group
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# make per-device lists of parameters
paramlists = OrderedDict()
for param in self.module.parameters():
device = param.device
if paramlists.get(device) is None:
paramlists[device] = []
paramlists[device] += [param]
self.per_device_params = list(paramlists.values())
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
@contextmanager
def no_sync(self):
"""A context manager to disable gradient synchronization."""
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
yield
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce(self):
"""
This function must be called explicitly after backward to reduce
gradients. There is no automatic hook like c10d.
"""
def all_reduce_params(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset:offset+sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset:offset+sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))
else:
p.grad = buffer[offset:offset+sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if self.accumulate_grads:
return
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
for params in self.per_device_params:
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in params:
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works "
"with gradients that don't require "
"grad")
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
all_reduce_params([param])
else:
if offset + sz > self.buffer.numel():
all_reduce_params(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
all_reduce_params(buffered_params)
reduction_fn()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/__init__.py | fairseq/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['pdb']
__version__ = '0.9.0'
import sys
# backwards compatibility to support `from fairseq.meters import AverageMeter`
from fairseq.logging import meters, metrics, progress_bar # noqa
sys.modules['fairseq.meters'] = meters
sys.modules['fairseq.metrics'] = metrics
sys.modules['fairseq.progress_bar'] = progress_bar
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.scoring # noqa
import fairseq.tasks # noqa
import fairseq.token_generation_constraints # noqa
import fairseq.benchmark # noqa
import fairseq.model_parallel # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tokenizer.py | fairseq/tokenizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/sequence_generator.py | fairseq/sequence_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from fairseq.models.fairseq_encoder import EncoderOut
from torch import Tensor
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None else {self.eos})
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = hasattr(self.search, 'needs_src_lengths') and self.search.needs_src_lengths
self.model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if 'src_tokens' in net_input:
src_tokens = net_input['src_tokens']
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
elif 'source' in net_input:
src_tokens = net_input['source']
src_lengths = (
net_input['padding_mask'].size(-1) - net_input['padding_mask'].sum(-1)
if net_input['padding_mask'] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception('expected src_tokens or source in net input')
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimenions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError("Target-side constraints were provided, but search method doesn't support them")
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.model.max_decoder_positions() - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
finished = [
False for i in range(bsz)
] # a boolean array indicating if the sentence at the index is finished or not
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
# print(f'step: {step}')
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
lprobs = self._no_repeat_ngram(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(bsz, dtype=torch.bool, device=cand_indices.device)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(bsz, device=cand_indices.device).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor([float(elem["score"].item()) for elem in finalized[sent]])
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(List[Dict[str, Tensor]], finalized[sent])
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# set() is not supported in script export
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# print(f"{step} FINISHED {idx} {score} {sent}={unfin_idx} {cum_unfin}")
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
def calculate_banned_tokens(
self,
tokens,
step: int,
gen_ngrams: List[Dict[str, List[int]]],
no_repeat_ngram_size: int,
bbsz_idx: int,
):
tokens_list: List[int] = tokens[
bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1
].tolist()
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = ",".join([str(x) for x in tokens_list])
return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], []))
def transpose_list(self, l: List[List[int]]):
# GeneratorExp aren't supported in TS so ignoring the lint
min_len = min([len(x) for x in l]) # noqa
l2 = [[row[i] for row in l] for i in range(min_len)]
return l2
def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams: List[Dict[str, List[int]]] = [
torch.jit.annotate(Dict[str, List[int]], {})
for bbsz_idx in range(bsz * beam_size)
]
cpu_tokens = tokens.cpu()
for bbsz_idx in range(bsz * beam_size):
gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist()
for ngram in self.transpose_list(
[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]
):
key = ",".join([str(x) for x in ngram[:-1]])
gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get(
key, torch.jit.annotate(List[int], [])
) + [ngram[-1]]
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [
self.calculate_banned_tokens(
tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx
)
for bbsz_idx in range(bsz * beam_size)
]
else:
banned_tokens = [
torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)
]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx][
torch.tensor(banned_tokens[bbsz_idx]).long()
] = torch.tensor(-math.inf).to(lprobs)
return lprobs
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [
model.encoder.forward_torchscript(net_input)
for model in self.models
]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[EncoderOut],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[EncoderOut] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | true |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/file_io.py | fairseq/file_io.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
from typing import List, Optional
try:
from fvcore.common.file_io import PathManager as FVCorePathManager
except ImportError:
FVCorePathManager = None
class PathManager:
"""
Wrapper for insulating OSS I/O (using Python builtin operations) from
fvcore's PathManager abstraction (for transparently handling various
internal backends).
"""
@staticmethod
def open(
path: str,
mode: str = "r",
buffering: int = -1,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
):
if FVCorePathManager:
return FVCorePathManager.open(
path=path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
return open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool = False) -> bool:
if FVCorePathManager:
return FVCorePathManager.copy(
src_path=src_path, dst_path=dst_path, overwrite=overwrite
)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str, **kwargs) -> str:
if FVCorePathManager:
return FVCorePathManager.get_local_path(path, **kwargs)
return path
@staticmethod
def exists(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if FVCorePathManager:
return FVCorePathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.rm(path)
os.remove(path)
@staticmethod
def chmod(path: str, mode: int) -> None:
if "manifold" not in path:
os.chmod(path, mode)
@staticmethod
def register_handler(handler) -> None:
if FVCorePathManager:
return FVCorePathManager.register_handler(handler=handler)
@staticmethod
def copy_from_local(
local_path: str, dst_path: str, overwrite: bool = False, **kwargs
) -> None:
if FVCorePathManager:
return FVCorePathManager.copy_from_local(
local_path=local_path, dst_path=dst_path, overwrite=overwrite, **kwargs
)
return shutil.copyfile(local_path, dst_path)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/quantization_utils.py | fairseq/quantization_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from fairseq.modules.quantization import pq, quantization_options, scalar
logger = logging.getLogger(__name__)
def quantize_model_scalar(model, args):
quant_noise_scalar = getattr(args, 'quant_noise_scalar', 0)
if quant_noise_scalar > 0:
# quantize_model edits the model in place
scalar.quantize_model_(model, p=quant_noise_scalar, bits=8, update_step=1000)
return model
class Quantizer(object):
def __init__(self, config_path, max_epoch, max_update):
try:
import yaml
except ImportError:
raise ImportError('Please install yaml with: pip install yaml')
# parse config
if config_path:
with open(config_path) as config_file:
config = quantization_options.parse_config_yaml(
yaml.safe_load(config_file)
)
else:
config = quantization_options.parse_config_yaml({})
self.n_centroids_config = config["n_centroids"]
self.block_sizes_config = config["block_sizes"]
self.layers_to_quantize = config["layers_to_quantize"]
# We assume that training will run for a fixed number of epochs
# (or updates) and that we should train for equal durations
# between iterations of PQ.
num_iterations = len(self.layers_to_quantize)
if max_epoch > 0:
assert max_epoch % num_iterations == 0, (
'for iterative PQ, --max-epoch (={}) must be evenly divisible by '
'len(layers_to_quantize) (={})'.format(max_epoch, num_iterations)
)
self.epoch_schedule = max_epoch // num_iterations
else:
self.epoch_schedule = None
if max_update > 0:
assert max_update % num_iterations == 0, (
'for iterative PQ, --max-update (={}) must be evenly divisible by '
'len(layers_to_quantize) (={})'.format(max_update, num_iterations)
)
self.update_schedule = max_update // num_iterations
else:
self.update_schedule = None
assert (self.epoch_schedule is not None) ^ (self.update_schedule is not None), \
'for iterative PQ, cannot specify both --max-update and --max-epoch'
# 0 is a special value for quantization step, which will force
# the first call to begin_epoch() to call step()
self.quantization_step = 0
def set_trainer(self, trainer):
self.trainer = trainer
self.size_tracker = pq.SizeTracker(self.trainer.get_model())
def step(self):
"""Move to the next stage of quantization."""
if self.quantization_step >= len(self.layers_to_quantize):
# Maybe we just finished the last training step or we loaded
# a checkpoint for an iterative PQ model which previously
# finished training. Either way, don't quantize again.
return
logger.info(
'quantizing model (step={}; layers_to_quantize[step]={})'.format(
self.quantization_step, self.layers_to_quantize[self.quantization_step]
)
)
quantized_layers = pq.quantize_model_(
self.trainer.get_model(),
self.size_tracker,
self.layers_to_quantize,
self.block_sizes_config,
self.n_centroids_config,
step=self.quantization_step,
)
logger.info('quantized layers: {}'.format(quantized_layers))
logger.info(self.size_tracker)
self.quantization_step += 1
# reintialize the Trainer since model parameters have changed
self.trainer.reinitialize()
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch (epochs start at 1)."""
if (
(
self.epoch_schedule is not None
and epoch > 0
and (epoch - 1) % self.epoch_schedule == 0
)
# we always step once in the beginning, even if using
# update-based quantization
or self.quantization_step == 0
):
self.step()
def step_update(self, num_updates):
"""Called at the end of each step."""
if (
self.update_schedule is not None
and num_updates > 0
and num_updates % self.update_schedule == 0
):
self.step()
def state_dict(self):
return {
'n_centroids_config': self.n_centroids_config,
'block_sizes_config': self.block_sizes_config,
'layers_to_quantize': self.layers_to_quantize,
'epoch_schedule': self.epoch_schedule,
'update_schedule': self.update_schedule,
'quantization_step': self.quantization_step,
}
def load_state_dict(self, state_dict):
self.n_centroids_config = state_dict['n_centroids_config']
self.block_sizes_config = state_dict['block_sizes_config']
self.layers_to_quantize = state_dict['layers_to_quantize']
self.epoch_schedule = state_dict['epoch_schedule']
self.update_schedule = state_dict['update_schedule']
self.quantization_step = state_dict['quantization_step']
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/pdb.py | fairseq/pdb.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ['set_trace']
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/iterative_refinement_generator.py | fairseq/iterative_refinement_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import torch
import numpy as np
from fairseq import utils
DecoderOut = namedtuple('IterativeRefinementDecoderOut', [
'output_tokens',
'output_scores',
'attn',
'step',
'max_step',
'history'
])
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None, constraints=None):
if constraints is not None:
raise NotImplementedError("Constrained decoding with the IterativeRefinementGenerator is not supported")
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert self.beam_size > 1, "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, 'enable_ensemble'):
assert model.allow_ensemble, "{} does not support ensembling".format(model.__class__.__name__)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
# initialize
encoder_out = model.forward_encoder([src_tokens, src_lengths])
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
if self.beam_size > 1:
assert model.allow_length_beam, \
"{} does not support decoding with length beam.".format(model.__class__.__name__)
# regenerate data based on length-beam
length_beam_order = utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, length_beam_order)
prev_decoder_out = model.regenerate_length_beam(prev_decoder_out, self.beam_size)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(decoder_out.output_tokens.size(0)).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]['history'] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]['history'].append(
finalized_hypos(
step,
finalized_history_tokens[j][i],
None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, not_terminated.nonzero(as_tuple=False).squeeze())
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[np.argmax(
[finalized[self.beam_size * i + j][0]['score'] for j in range(self.beam_size)]
) + self.beam_size * i] for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]['tokens'] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = finalized_tokens[0].new_zeros(len(finalized_tokens), finalized_maxlen).fill_(self.pad)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, :f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[:, 0] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)).t().reshape(-1)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(reranker_encoder_out, length_beam_order)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(reranking_scores)
for i in range(len(finalized)):
finalized[i][0]['score'] = reranking_scores[i]
return finalized
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/sequence_scorer.py | fairseq/sequence_scorer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sys
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(
self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None,
symbols_to_strip_from_output=None,
):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos() if eos is None else eos
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
self.compute_alignment = compute_alignment
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None else {self.eos})
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model(**net_input)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
if self.compute_alignment:
alignment = utils.extract_hard_alignment(
avg_attn_i,
sample['net_input']['src_tokens'][i],
sample['target'][i],
self.pad,
self.eos,
)
else:
alignment = None
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/options.py | fairseq/options.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from typing import Callable, List, Optional
import torch
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_str_list, eval_str_dict, eval_bool # noqa
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
add_common_eval_args(group)
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, "max_sentences_valid") and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch"):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
# fmt: off
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use',
choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',
help='path to save logs for tensorboard, should match --logdir '
'of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=None, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--tpu', action='store_true', help='use TPU instead of CUDA')
parser.add_argument('--bf16', action='store_true', help='use bfloat16; implies --tpu')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-bf16', action='store_true',
help='use a memory-efficient version of BF16 training; implies --bf16')
parser.add_argument('--memory-efficient-fp16', action='store_true',
help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-no-flatten-grads', action='store_true',
help='don\'t flatten FP16 grads tensor')
parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,
help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int,
help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,
help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',
help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float,
help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None,
help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int,
help='how often to clear the PyTorch CUDA cache (0 to disable)')
parser.add_argument('--all-gather-list-size', default=16384, type=int,
help='number of bytes reserved for gathering stats from workers')
parser.add_argument('--model-parallel-size', type=int, metavar='N',
default=1,
help='total number of GPUs to parallelize model over')
parser.add_argument('--checkpoint-suffix', default='',
help='suffix to add to the checkpoint file name')
parser.add_argument('--quantization-config-path', default=None,
help='path to quantization config file')
parser.add_argument('--profile', action='store_true', help='enable autograd profiler emit_nvtx')
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
'--' + registry_name.replace('_', '-'),
default=REGISTRY['default'],
choices=REGISTRY['registry'].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task')
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("Dataset and data loading")
# fmt: off
group.add_argument('--num-workers', default=1, type=int, metavar='N',
help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',
help='batch size will either be less than this value, '
'or a multiple of this value')
parser.add_argument('--dataset-impl', metavar='FORMAT',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument('--data-buffer-size', default=10, type=int, metavar='N',
help='number of batches to preload')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT',
help='data subset to use for training (e.g. train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (e.g. train, valid, test)')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
group.add_argument('--validate-interval-updates', type=int, default=0, metavar='N',
help='validate every N updates')
group.add_argument('--validate-after-updates', type=int, default=0, metavar='N',
help='dont validate until reaching this many updates')
group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N',
help='specified random seed for validation')
group.add_argument('--disable-validation', action='store_true',
help='disable validation')
group.add_argument('--max-tokens-valid', type=int, metavar='N',
help='maximum number of tokens in a validation batch'
' (defaults to --max-tokens)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
group.add_argument('--curriculum', default=0, type=int, metavar='N',
help='don\'t shuffle batches for first N epochs')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("Distributed training")
# fmt: off
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
group.add_argument('--distributed-world-size', type=int, metavar='N',
default=default_world_size,
help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
group.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', '--local_rank', default=0, type=int,
help='which GPU to use (usually configured automatically)')
group.add_argument('--distributed-no-spawn', action='store_true',
help='do not spawn multiple processes even if multiple GPUs are visible')
group.add_argument('--distributed-num-procs', default=None, type=int,
help='number of processes to spawn (usually configured automatically)')
# "c10d" is PyTorch's DDP implementation and provides the fastest
# training. "no_c10d" is a more robust, but slightly slower DDP
# implementation. Try this if you get warning messages about
# inconsistent gradients between workers, or if some of your model
# parameters are not always used.
group.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',
help='bucket size for reduction')
group.add_argument('--fix-batches-to-gpus', action='store_true',
help='don\'t shuffle batches between GPUs; this reduces overall '
'randomness and may affect precision but avoids the cost of '
're-reading the data')
group.add_argument('--find-unused-parameters', default=False, action='store_true',
help='disable unused parameter detection (not applicable to '
'no_c10d ddp-backend')
group.add_argument('--fast-stat-sync', default=False, action='store_true',
help='[deprecated] this is now defined per Criterion')
group.add_argument('--broadcast-buffers', default=False, action='store_true',
help='Copy non-trainable parameters between GPUs, such as '
'batchnorm population statistics')
group.add_argument('--distributed-wrapper', default='DDP', type=str,
choices=['DDP', 'SlowMo'],
help='DistributedDataParallel backend')
# Add arguments for SlowMo - these will be used when SlowMo is enabled via above
group.add_argument('--slowmo-momentum', default=None, type=float,
help='SlowMo momentum term; by default use 0.0 for 16 GPUs, '
'0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs')
group.add_argument('--slowmo-algorithm', default='LocalSGD', choices=['LocalSGD', 'SGP'],
help='whether to use LocalSGD or SGP')
group.add_argument('--localsgd-frequency', default=3, type=int,
help='Local SGD allreduce frequency')
group.add_argument('--nprocs-per-node', type=int, metavar='N',
default=max(1, torch.cuda.device_count()),
help='number of GPUs in each node. An allreduce operation across GPUs in '
'a node is very fast. Hence, we do allreduce across GPUs in a node, '
'and gossip across different nodes')
# Pipeline Parallel Arguments
group.add_argument('--pipeline-model-parallel', default=False, action='store_true',
help='if set, use pipeline model parallelism across GPUs')
group.add_argument('--pipeline-balance', metavar='N1,N2,...,N_K',
type=lambda x: eval_str_list(x, type=int),
help='partition the model into N_K pieces, where each piece '
'contains N_i layers. The sum(args.pipeline_balance) '
'should equal the total number of layers in the model')
group.add_argument('--pipeline-devices', metavar='N1,N2,...,N_K',
type=lambda x: eval_str_list(x, type=int),
help='a list of device indices indicating which device to place '
'each of the N_K partitions. The length of this list should '
'equal the length of the --pipeline-balance argument')
group.add_argument('--pipeline-chunks', type=int, metavar='N',
help='microbatch count for pipeline model parallelism')
group.add_argument('--pipeline-checkpoint', type=str, metavar='STR',
choices=['always', 'never', 'except_last'],
default='never',
help='checkpointing mode for pipeline model parallelism')
# Add argument for ZeRO sharding of OptimizerState(os), gradients(g) and parameters(p)
group.add_argument('--zero-sharding', default='none', type=str,
choices=['none', 'os'],
help='ZeRO sharding')
# fmt: on
return group
def add_optimization_args(parser):
group = parser.add_argument_group("Optimization")
# fmt: off
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--stop-time-hours', default=0, type=float, metavar='N',
help='force stop training after specified cumulative time (if >0)')
group.add_argument('--clip-norm', default=0.0, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--sentence-avg', action='store_true',
help='normalize gradients by the number of sentences in a batch'
' (default is to normalize by number of tokens)')
group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',
type=lambda uf: eval_str_list(uf, type=int),
help='update parameters every N_i batches, when in epoch i')
group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,
metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--min-lr', default=-1, type=float, metavar='LR',
help='stop training when the learning rate reaches this minimum')
group.add_argument('--use-bmuf', default=False, action='store_true',
help='specify global optimizer for syncing models on different GPUs/shards')
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("Checkpointing")
# fmt: off
group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename from which to load checkpoint '
'(default: <save-dir>/checkpoint_last.pt')
group.add_argument('--finetune-from-model', default=None, type=str,
help='finetune from a pretrained model; '
'note that meters and lr scheduler will be reset')
group.add_argument('--reset-dataloader', action='store_true',
help='if set, does not reload dataloader state from the checkpoint')
group.add_argument('--reset-lr-scheduler', action='store_true',
help='if set, does not load lr scheduler state from the checkpoint')
group.add_argument('--reset-meters', action='store_true',
help='if set, does not load meters from the checkpoint')
group.add_argument('--reset-optimizer', action='store_true',
help='if set, does not load optimizer state from the checkpoint')
group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override optimizer args when loading a checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',
help='save a checkpoint (and validate) every N updates')
group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',
help='keep the last N checkpoints saved with --save-interval-updates')
group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',
help='keep last N epoch checkpoints')
group.add_argument('--keep-best-checkpoints', type=int, default=-1, metavar='N',
help='keep best N checkpoints based on scores')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--no-last-checkpoints', action='store_true',
help='don\'t store last checkpoints')
group.add_argument('--no-save-optimizer-state', action='store_true',
help='don\'t save optimizer-state as part of checkpoint')
group.add_argument('--best-checkpoint-metric', type=str, default='loss',
help='metric to use for saving "best" checkpoints')
group.add_argument('--maximize-best-checkpoint-metric', action='store_true',
help='select the largest metric value for saving "best" checkpoints')
group.add_argument('--patience', type=int, default=-1, metavar='N',
help=('early stop training if valid performance doesn\'t '
'improve for N consecutive validation runs; note '
'that this is influenced by --validate-interval'))
# fmt: on
return group
def add_common_eval_args(group):
# fmt: off
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--remove-bpe', '--post-process', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring (can be set to sentencepiece)')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override model args at generation '
'that were used during model training')
group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,
help='path to save eval results (optional)"')
# fmt: on
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true',
help='if set, outputs word statistics such as word count, average probability, etc')
group.add_argument('--context-window', default=0, type=int, metavar='N',
help='ensures that every evaluated token has access to a context of at least this size,'
' if possible')
group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',
help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'
' in order to fit into GPU memory')
# fmt: on
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | true |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/semisupervised_translation.py | fairseq/tasks/semisupervised_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import logging
import os
from fairseq.data import (
BacktranslationDataset,
data_utils,
indexed_dataset,
IndexedCachedDataset,
IndexedDataset,
IndexedRawTextDataset,
LanguagePairDataset,
NoisingDataset,
RoundRobinZipDatasets,
)
from fairseq.models import FairseqMultiModel
from fairseq.sequence_generator import SequenceGenerator
from .multilingual_translation import MultilingualTranslationTask
from . import register_task
from fairseq import utils
logger = logging.getLogger(__name__)
def _get_bt_dataset_key(lang_pair):
return "bt:" + lang_pair
def _get_denoising_dataset_key(lang_pair):
return "denoising:" + lang_pair
# ported from UnsupervisedMT
def parse_lambda_config(x):
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
split = x.split(',')
if len(split) == 1:
return float(x), None
else:
split = [s.split(os.pathsep) for s in split]
assert all(len(s) == 2 for s in split)
assert all(k.isdigit() for k, _ in split)
assert all(int(split[i][0]) < int(split[i + 1][0]) for i in range(len(split) - 1))
return float(split[0][1]), [(int(k), float(v)) for k, v in split]
@register_task('semisupervised_translation')
class SemisupervisedTranslationTask(MultilingualTranslationTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, instead of `--lang-pairs`.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default="1.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (parallel data). '
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-denoising-config', default="0.0", type=str, metavar='CONFIG',
help='Cross-entropy reconstruction coefficient (denoising autoencoding)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-otf-bt-config', default="0.0", type=str, metavar='CONFIG',
help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)'
'use fixed weight during training if set to floating point number. '
'use piecewise linear function over number of updates to schedule the '
'weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N',
help='generate back-translated sequences of maximum length ax + b, where x is the '
'source length')
parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N',
help='beam size used in beam search of online back-translation')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
self.lambda_parallel, self.lambda_parallel_steps = parse_lambda_config(args.lambda_parallel_config)
self.lambda_otf_bt, self.lambda_otf_bt_steps = parse_lambda_config(args.lambda_otf_bt_config)
self.lambda_denoising, self.lambda_denoising_steps = parse_lambda_config(args.lambda_denoising_config)
if (self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None):
denoising_lang_pairs = [
"%s-%s" % (tgt, tgt)
for tgt in {lang_pair.split('-')[1] for lang_pair in args.lang_pairs}
]
self.model_lang_pairs = self.model_lang_pairs + denoising_lang_pairs
self.backtranslate_datasets = {}
self.backtranslators = {}
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = MultilingualTranslationTask.prepare(args, **kwargs)
return cls(args, dicts, training)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def split_exists(split, src, tgt, lang):
if src is not None:
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
else:
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, src, tgt))
return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl)
def load_indexed_dataset(path, dictionary):
return data_utils.load_indexed_dataset(path, dictionary, self.args.dataset_impl)
# load parallel datasets
src_datasets, tgt_datasets = {}, {}
if (self.lambda_parallel > 0.0 or self.lambda_parallel_steps is not None or not split.startswith("train")):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split('-')
if split_exists(split, src, tgt, src):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, src, tgt))
elif split_exists(split, tgt, src, src):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, tgt, src))
else:
continue
src_datasets[lang_pair] = load_indexed_dataset(prefix + src, self.dicts[src])
tgt_datasets[lang_pair] = load_indexed_dataset(prefix + tgt, self.dicts[tgt])
logger.info('parallel-{} {} {} examples'.format(data_path, split, len(src_datasets[lang_pair])))
if len(src_datasets) == 0:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
# back translation datasets
backtranslate_datasets = {}
if (self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None) and split.startswith("train"):
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split('-')
if not split_exists(split, tgt, None, tgt):
raise FileNotFoundError('Dataset not found: backtranslation {} ({})'.format(split, data_path))
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt))
dataset = load_indexed_dataset(filename, self.dicts[tgt])
lang_pair_dataset_tgt = LanguagePairDataset(
dataset,
dataset.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
lang_pair_dataset = LanguagePairDataset(
dataset,
dataset.sizes,
src_dict=self.dicts[src],
tgt=dataset,
tgt_sizes=dataset.sizes,
tgt_dict=self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
)
backtranslate_datasets[lang_pair] = BacktranslationDataset(
tgt_dataset=self.alter_dataset_langtok(
lang_pair_dataset_tgt,
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_lang=src,
),
backtranslation_fn=self.backtranslators[lang_pair],
src_dict=self.dicts[src], tgt_dict=self.dicts[tgt],
output_collater=self.alter_dataset_langtok(
lang_pair_dataset=lang_pair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
).collater,
)
logger.info('backtranslate-{}: {} {} {} examples'.format(
tgt, data_path, split, len(backtranslate_datasets[lang_pair]),
))
self.backtranslate_datasets[lang_pair] = backtranslate_datasets[lang_pair]
# denoising autoencoder
noising_datasets = {}
if (self.lambda_denoising > 0.0 or self.lambda_denoising_steps is not None) and split.startswith("train"):
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split('-')
if not split_exists(split, tgt, None, tgt):
continue
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt))
tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt])
tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt])
noising_dataset = NoisingDataset(
tgt_dataset1,
self.dicts[tgt],
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noising_datasets[lang_pair] = self.alter_dataset_langtok(
LanguagePairDataset(
noising_dataset,
tgt_dataset1.sizes,
self.dicts[tgt],
tgt_dataset2,
tgt_dataset2.sizes,
self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
src_eos=self.dicts[tgt].eos(),
src_lang=tgt,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
logger.info('denoising-{}: {} {} {} examples'.format(
tgt, data_path, split, len(noising_datasets[lang_pair]),
))
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
src_dataset, tgt_dataset = src_datasets[lang_pair], tgt_datasets[lang_pair]
return self.alter_dataset_langtok(
LanguagePairDataset(
src_dataset, src_dataset.sizes, self.dicts[src],
tgt_dataset, tgt_dataset.sizes, self.dicts[tgt],
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
),
self.dicts[src].eos(),
src,
self.dicts[tgt].eos(),
tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in src_datasets.keys()
] + [
(_get_bt_dataset_key(lang_pair), dataset)
for lang_pair, dataset in backtranslate_datasets.items()
] + [
(_get_denoising_dataset_key(lang_pair), dataset)
for lang_pair, dataset in noising_datasets.items()
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('SemisupervisedTranslationTask requires a FairseqMultiModel architecture')
# create SequenceGenerator for each model that has backtranslation dependency on it
self.sequence_generators = {}
if (self.lambda_otf_bt > 0.0 or self.lambda_otf_bt_steps is not None) and self.training:
for lang_pair in self.lang_pairs:
src, tgt = lang_pair.split('-')
key = '{}-{}'.format(tgt, src)
self.sequence_generators[key] = SequenceGenerator(
[model.models[key]],
tgt_dict=self.dicts[src],
beam_size=args.bt_beam_size,
max_len_a=args.bt_max_len_a,
max_len_b=args.bt_max_len_b,
)
decoder_lang_tok_idx = self.get_decoder_langtok(src)
def backtranslate_fn(
sample, model=model.models[key],
bos_token=decoder_lang_tok_idx,
sequence_generator=self.sequence_generators[key],
):
return sequence_generator.generate(
[model],
sample,
bos_token=bos_token,
)
self.backtranslators[lang_pair] = backtranslate_fn
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
if update_num > 0:
self.update_step(update_num)
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
def forward_backward(model, samples, logging_output_key, weight):
nonlocal agg_loss, agg_sample_size, agg_logging_output
if samples is None or len(samples) == 0:
return
loss, sample_size, logging_output = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[logging_output_key] += logging_output[k]
if self.lambda_parallel > 0.0:
for lang_pair in self.lang_pairs:
forward_backward(model.models[lang_pair], sample[lang_pair], lang_pair, self.lambda_parallel)
if self.lambda_otf_bt > 0.0:
for lang_pair in self.lang_pairs:
sample_key = _get_bt_dataset_key(lang_pair)
forward_backward(model.models[lang_pair], sample[sample_key], sample_key, self.lambda_otf_bt)
if self.lambda_denoising > 0.0:
for lang_pair in self.lang_pairs:
_, tgt = lang_pair.split('-')
sample_key = _get_denoising_dataset_key(lang_pair)
forward_backward(model.models['{0}-{0}'.format(tgt)], sample[sample_key], sample_key, self.lambda_denoising)
return agg_loss, agg_sample_size, agg_logging_output
def update_step(self, num_updates):
def lambda_step_func(config, n_iter):
"""
Update a lambda value according to its schedule configuration.
"""
ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]]
if len(ranges) == 0:
assert n_iter >= config[-1][0]
return config[-1][1]
assert len(ranges) == 1
i = ranges[0]
x_a, y_a = config[i]
x_b, y_b = config[i + 1]
return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
if self.lambda_parallel_steps is not None:
self.lambda_parallel = lambda_step_func(self.lambda_parallel_steps, num_updates)
if self.lambda_denoising_steps is not None:
self.lambda_denoising = lambda_step_func(self.lambda_denoising_steps, num_updates)
if self.lambda_otf_bt_steps is not None:
self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/translation_from_pretrained_xlm.py | fairseq/tasks/translation_from_pretrained_xlm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.tasks.translation import TranslationTask
from . import register_task
@register_task("translation_from_pretrained_xlm")
class TranslationFromPretrainedXLMTask(TranslationTask):
"""
Same as TranslationTask except use the MaskedLMDictionary class so that
we can load data that was binarized with the MaskedLMDictionary class.
This task should be used for the entire training pipeline when we want to
train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
training NMT with the pretrained XLM checkpoint, and subsequent evaluation
of that trained model.
"""
@classmethod
def load_dictionary(cls, filename):
"""Load the masked LM dictionary from the filename
Args:
filename (str): the filename
"""
return MaskedLMDictionary.load(filename)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/multilingual_masked_lm.py | fairseq/tasks/multilingual_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
ConcatDataset,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
PadDataset,
PrependTokenDataset,
RawLabelDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
)
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('multilingual_masked_lm')
class MultiLingualMaskedLMTask(LegacyFairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
parser.add_argument('--mask-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float,
help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float,
help='probability of replacing a token with a random token')
parser.add_argument('--freq-weighted-replacement', action='store_true',
help='sample random replacement words based on word frequencies')
parser.add_argument('--mask-whole-words', default=False, action='store_true',
help='mask whole words; you may also want to set --bpe')
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0,
help='smoothing alpha for sample rations across multiple datasets')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
bpe = encoders.build_bpe(self.args)
if bpe is not None:
def is_beginning_of_word(i):
if i < self.source_dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = self.source_dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(
map(is_beginning_of_word, range(len(self.source_dictionary)))
))
else:
mask_whole_words = None
return mask_whole_words
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
languages = sorted(
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info("Language to id mapping: ", {
lang: id for id, lang in enumerate(languages)
}
)
mask_whole_words = self._get_whole_word_mask()
lang_datasets = []
for lang_id, language in enumerate(languages):
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
lang_dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
'lang_id': RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'loaded total {} blocks for all languages'.format(
dataset_lengths.sum(),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info("Sample probability by language: ", {
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
}
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info("Up/Down Sampling ratio by language: ", {
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
}
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(resampled_lang_datasets)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + '_' + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
# [TODO]: This is hacky for now to print validation ppl for each
# language individually. Maybe need task API changes to allow it
# in more generic ways.
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = PadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode='eos',
),
pad_idx=self.source_dictionary.pad(),
left_pad=False,
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
'id': IdDataset(),
'net_input': {
'src_tokens': src_dataset,
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/multilingual_denoising.py | fairseq/tasks/multilingual_denoising.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
data_utils,
Dictionary,
AppendTokenDataset,
ConcatDataset,
DenoisingDataset,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
)
from .denoising import DenoisingTask
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
logger = logging.getLogger(__name__)
@register_task('multilingual_denoising')
class MultilingualDenoisingTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0,
help='smoothing alpha for sample ratios across multiple datasets')
parser.add_argument('--add-lang-token', default=False, action='store_true')
parser.add_argument('--langs', type=str, help="language ids we are considering", default=None)
parser.add_argument('--no-whole-word-mask-langs', type=str, default='', metavar='N',
help='languages without spacing between words dont support whole word masking')
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
paths = args.data.split(':')
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
data_path = paths[0]
if args.langs is None:
languages = sorted([
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
])
else:
languages = args.langs.split(',')
if args.add_lang_token:
for lang in languages:
dictionary.add_symbol('[{}]'.format(lang))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, 'shuffle_instance'):
args.shuffle_instance = False
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.langs = args.langs
self.args = args
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling porbability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
if self.langs is None:
languages = sorted([
name for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
])
else:
languages = self.langs.split(',')
for name in languages:
p = os.path.join(data_path, name)
assert os.path.exists(p), "data not found: {}".format(p)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info("Language to id mapping: ", {
lang: id for id, lang in enumerate(languages)
}
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(',')
lang_datasets = []
for language in languages:
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
end_token = self.source_dictionary.index('[{}]'.format(language)) \
if self.args.add_lang_token else self.source_dictionary.eos()
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, end_token)
lang_mask_whole_words = mask_whole_words if language not in language_without_segmentations else None
lang_dataset = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
lang_mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
eos=None if not self.args.add_lang_token else self.source_dictionary.index('[{}]'.format(language)),
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
'loaded total {} blocks for all languages'.format(
int(dataset_lengths.sum()),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: {}".format({
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(languages)
})
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: {}".format({
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(languages)
})
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + '_' + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ','.join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/fairseq_task.py | fairseq/tasks/fairseq_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import warnings
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import data_utils, FairseqDataset, iterators, Dictionary
from argparse import Namespace
logger = logging.getLogger(__name__)
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, args):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return (os.pathsep in getattr(self.args, 'data', ''))
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def filter_indices_by_size(
self,
indices,
dataset,
max_positions=None,
ignore_invalid_inputs=False,
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception((
'Size of sample #{} is invalid (={}) since max_positions={}, '
'skip this example with --skip-invalid-size-inputs-valid-test'
).format(ignored[0], dataset.size(ignored[0]), max_positions))
logger.warning((
'{} samples have invalid sizes and will be skipped, '
'max_positions={}, first few sample ids={}'
).format(len(ignored), max_positions, ignored[:10]))
return indices
def can_reuse_epoch_itr(self, dataset):
# We can reuse the epoch iterator across epochs as long as the dataset
# hasn't disabled it. We default to ``False`` here, although in practice
# this will be ``True`` for most datasets that inherit from
# ``FairseqDataset`` due to the base implementation there.
return getattr(dataset, 'can_reuse_epoch_itr_across_epochs', False)
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
can_reuse_epoch_itr = (
not disable_iterator_cache
and self.can_reuse_epoch_itr(dataset)
)
if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:
logger.debug('reusing EpochBatchIterator for epoch {}'.format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
)
if can_reuse_epoch_itr:
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self)
if getattr(args, 'tpu', False):
model.prepare_for_tpu_()
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(
self, models, args,
seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(self.target_dictionary, args.constraints)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
class LegacyFairseqTask(FairseqTask):
def __init__(self, args: Namespace):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def setup_task(cls, args: Namespace, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return (os.pathsep in getattr(self.args, 'data', ''))
def build_model(self, args: Namespace):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self)
if getattr(args, 'tpu', False):
model.prepare_for_tpu_()
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args: Namespace):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/audio_pretraining.py | fairseq/tasks/audio_pretraining.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import sys
from fairseq.data import FileAudioDataset, Dictionary, AddTargetDataset
from . import LegacyFairseqTask, register_task
class LabelEncoder(object):
def __init__(self, dictionary):
self.dictionary = dictionary
def __call__(self, label):
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False
)
@register_task("audio_pretraining")
class AudioPretrainingTask(LegacyFairseqTask):
"""
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--sample-rate",
default=16000,
type=int,
help="target sample rate. audio files will be up/down sampled to this rate",
)
parser.add_argument(
"--normalize",
action="store_true",
help="if set, normalizes input to have 0 mean and unit variance",
)
parser.add_argument(
"--max-sample-size",
default=None,
type=int,
help="max sample size to crop to for batching. default = min sample length",
)
parser.add_argument(
"--min-sample-size",
default=None,
type=int,
help="min sample size to crop to for batching. default = same as --max-sample-size",
)
parser.add_argument(
"--enable-padding",
action="store_true",
help="pad shorter samples instead of cropping",
)
parser.add_argument(
"--labels",
type=str,
default=None,
help="extension of the label file to load, if any",
)
def __init__(self, args, source_dictionary=None):
super().__init__(args)
self._target_dictionary = None
self._source_dictionary = source_dictionary
self.is_ctc = args.criterion == "ctc"
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args)
def load_dataset(self, split, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
manifest = os.path.join(self.args.data, "{}.tsv".format(split))
self.datasets[split] = FileAudioDataset(
manifest,
sample_rate=self.args.sample_rate,
max_sample_size=self.args.max_sample_size,
min_sample_size=self.args.max_sample_size,
min_length=self.args.min_sample_size,
pad=self.args.labels is not None or self.args.enable_padding,
normalize=self.args.normalize,
)
if self.args.labels:
dict_path = os.path.join(self.args.data, f"dict.{self.args.labels}.txt")
self._target_dictionary = Dictionary.load(dict_path)
label_path = os.path.join(self.args.data, f"{split}.{self.args.labels}")
labels = []
with open(label_path, "r") as f:
for line in f:
labels.append(line)
process_label = LabelEncoder(self.target_dictionary)
self.datasets[split] = AddTargetDataset(
self.datasets[split],
labels,
pad=self.target_dictionary.pad(),
eos=self.target_dictionary.eos(),
batch_targets=True,
process_label=process_label,
add_to_input=not self.is_ctc,
)
@property
def source_dictionary(self):
return self._source_dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self._target_dictionary
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(
self,
indices,
dataset,
max_positions=None,
ignore_invalid_inputs=False,
):
# we do not need to filter by size in this task as dataloaders take care of this
return indices
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/denoising.py | fairseq/tasks/denoising.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from fairseq.data import (
data_utils,
Dictionary,
AppendTokenDataset,
DenoisingDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('denoising')
class DenoisingTask(LegacyFairseqTask):
"""
Denoising task for applying sequence to sequence denoising. (ie. BART)
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='path to data directory')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments'
' per sample for dataset')
parser.add_argument(
'--sample-break-mode', default="complete_doc", type=str,
help='mode for breaking sentence',
)
parser.add_argument(
'--mask', default=0.0, type=float,
help='fraction of words/subwords that will be masked',
)
parser.add_argument(
'--mask-random', default=0.0, type=float,
help='instead of using [MASK], use random token this often'
)
parser.add_argument(
'--insert', default=0.0, type=float,
help='insert this percentage of additional random tokens',
)
parser.add_argument(
'--permute', default=0.0, type=float,
help='take this proportion of subwords and permute them',
)
parser.add_argument(
'--rotate', default=0.5, type=float,
help='rotate this proportion of inputs',
)
parser.add_argument(
'--poisson-lambda', default=3.0, type=float,
help='randomly shuffle sentences for this proportion of inputs'
)
parser.add_argument(
'--permute-sentences', default=0.0, type=float,
help='shuffle this proportion of sentences in all inputs'
)
parser.add_argument(
'--mask-length', default="subword", type=str,
choices=['subword', 'word', 'span-poisson'],
help='mask length to choose'
)
parser.add_argument(
'--replace-length', default=-1, type=int,
help='when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)'
)
parser.add_argument(
'--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence'
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol('<mask>')
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
if not hasattr(args, 'shuffle_instance'):
args.shuffle_instance = False
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = StripTokenDataset(dataset, self.dictionary.eos())
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s> and one for </s>
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
document_sep_len=0
)
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, self.source_dictionary.eos())
mask_whole_words = get_whole_word_mask(self.args, self.source_dictionary) \
if self.args.mask_length != 'subword' else None
self.datasets[split] = DenoisingDataset(
dataset, dataset.sizes, self.dictionary, self.mask_idx,
mask_whole_words, shuffle=self.args.shuffle_instance,
seed=self.seed, args=self.args
)
logger.info(
"Split: {0}, Loaded {1} samples of denoising_dataset".format(
split,
len(self.datasets[split]),
)
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dictionary
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/multilingual_translation.py | fairseq/tasks/multilingual_translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import logging
import os
from fairseq import options
import contextlib
import torch
from fairseq import metrics, utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import register_task, LegacyFairseqTask
logger = logging.getLogger(__name__)
def _lang_token(lang: str):
return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, \
'cannot find language token for lang {}'.format(lang)
return idx
@register_task('multilingual_translation')
class MultilingualTranslationTask(LegacyFairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def prepare(cls, args, **kargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == 'src':
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset,
src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if self.args.encoder_langtok is not None and src_eos is not None \
and src_lang is not None and tgt_lang is not None:
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=1, **kwargs):
"""Load a dataset split."""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(
data_path, split, src, self.dicts[src], tgt, self.dicts[tgt],
combine=True, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError("Constrained decoding with the multilingual_translation task is not supported")
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict([(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths,
self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)]),
eval_key=lang_pair,
)
def build_model(self, args):
def check_args():
messages = []
if len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0:
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if self.args.encoder_langtok != args.encoder_langtok:
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if self.args.decoder_langtok != args.decoder_langtok:
messages.append('--decoder-langtok should {} be set.'.format("" if args.decoder_langtok else "not"))
if len(messages) > 0:
raise ValueError(' '.join(messages))
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
curr_lang_pairs = [
lang_pair
for lang_pair in self.model_lang_pairs
if sample[lang_pair] is not None and len(sample[lang_pair]) != 0
]
for idx, lang_pair in enumerate(curr_lang_pairs):
def maybe_no_sync():
if (
self.args.distributed_world_size > 1
and hasattr(model, 'no_sync')
and idx < len(curr_lang_pairs) - 1
):
return model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
with maybe_no_sync():
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0., 0., defaultdict(float)
for lang_pair in self.eval_lang_pairs:
if lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[k] += logging_output[k]
agg_logging_output[f"{lang_pair}:{k}"] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
if self.args.decoder_langtok:
bos_token = _lang_token_index(self.target_dictionary, self.args.target_lang)
else:
bos_token = self.target_dictionary.eos()
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=bos_token,
)
def reduce_metrics(self, logging_outputs, criterion):
with metrics.aggregate():
# pass 'sample_size', 'nsentences', 'ntokens' stats to fairseq_task
super().reduce_metrics(logging_outputs, criterion)
for k in ['sample_size', 'nsentences', 'ntokens']:
metrics.log_scalar(k, sum(l[k] for l in logging_outputs))
@property
def source_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
if self.training:
return next(iter(self.dicts.values()))
else:
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {'%s-%s' % (self.args.source_lang, self.args.target_lang):
(self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
])
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/sentence_prediction.py | fairseq/tasks/sentence_prediction.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
OffsetTokensDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
RollDataset,
SortDataset,
StripTokenDataset,
)
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
logger = logging.getLogger(__name__)
@register_task('sentence_prediction')
class SentencePredictionTask(LegacyFairseqTask):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='FILE',
help='file prefix for data')
parser.add_argument('--num-classes', type=int, default=-1,
help='number of classes or regression targets')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, default=None,
help='add separator token between inputs')
parser.add_argument('--regression-target', action='store_true', default=False)
parser.add_argument('--no-shuffle', action='store_true', default=False)
parser.add_argument('--shorten-method', default='none',
choices=['none', 'truncate', 'random_crop'],
help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='',
help='comma-separated list of dataset splits to apply shortening to, '
'e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--add-prev-output-tokens', action='store_true', default=False,
help='add prev_output_tokens to sample, used for encoder-decoder arch')
def __init__(self, args, data_dictionary, label_dictionary):
super().__init__(args)
self.dictionary = data_dictionary
self._label_dictionary = label_dictionary
if not hasattr(args, 'max_positions'):
self._max_positions = (
args.max_source_positions,
args.max_target_positions,
)
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.num_classes > 0, 'Must set --num-classes'
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, 'input0', 'dict.txt'),
source=True,
)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
label_dict = None
if not args.regression_target:
# load label dictionary
label_dict = cls.load_dictionary(
args,
os.path.join(args.data, 'label', 'dict.txt'),
source=False,
)
logger.info('[label] dictionary: {} types'.format(len(label_dict)))
else:
label_dict = data_dict
return SentencePredictionTask(args, data_dict, label_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
self.args.dataset_impl,
combine=combine,
)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
assert input0 is not None, 'could not find dataset: {}'.format(get_path(type, split))
input1 = make_dataset('input1', self.source_dictionary)
if self.args.init_token is not None:
input0 = PrependTokenDataset(input0, self.args.init_token)
if input1 is None:
src_tokens = input0
else:
if self.args.separator_token is not None:
input1 = PrependTokenDataset(input1, self.args.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens))
src_tokens = maybe_shorten_dataset(
src_tokens,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.max_positions,
self.args.seed,
)
dataset = {
'id': IdDataset(),
'net_input': {
'src_tokens': RightPadDataset(
src_tokens,
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': NumelDataset(src_tokens, reduce=False),
},
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens, reduce=True),
}
if self.args.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(
RollDataset(src_tokens, 1),
pad_idx=self.dictionary.pad(),
)
dataset['net_input'].update(
prev_output_tokens=prev_tokens_dataset,
)
if not self.args.regression_target:
label_dataset = make_dataset('label', self.label_dictionary)
if label_dataset is not None:
dataset.update(
target=OffsetTokensDataset(
StripTokenDataset(
label_dataset,
id_to_strip=self.label_dictionary.eos(),
),
offset=-self.label_dictionary.nspecial,
)
)
else:
label_path = "{0}.label".format(get_path('label', split))
if os.path.exists(label_path):
def parse_regression_target(i, line):
values = line.split()
assert len(values) == self.args.num_classes, \
f'expected num_classes={self.args.num_classes} regression target values on line {i}, found: "{line}"'
return [float(x) for x in values]
with open(label_path) as h:
dataset.update(
target=RawLabelDataset([
parse_regression_target(i, line.strip())
for i, line in enumerate(h.readlines())
])
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[src_tokens.sizes],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, 'classification_head_name', 'sentence_classification_head'),
num_classes=self.args.num_classes,
)
return model
def max_positions(self):
return self._max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
@property
def label_dictionary(self):
return self._label_dictionary
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/translation.py | fairseq/tasks/translation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import json
import itertools
import logging
import os
from fairseq import options
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
data_utils,
encoders,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
)
from fairseq.tasks import register_task, LegacyFairseqTask
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False, append_source_id=False,
num_buckets=0,
shuffle=True,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info('{} {} {}-{} {} examples'.format(
data_path, split_k, src, tgt, len(src_datasets[-1])
))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(src_dataset, src_dict.index('[{}]'.format(src)))
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(tgt_dataset, tgt_dict.index('[{}]'.format(tgt)))
eos = tgt_dict.index('[{}]'.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset, src_dataset.sizes, src_dict,
tgt_dataset, tgt_dataset_sizes, tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset, eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
)
@register_task('translation')
class TranslationTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# options for reporting BLEU during validation
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
if split != getattr(self.args, "train_subset", None):
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != 'test'),
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints)
def build_model(self, args):
model = super().build_model(args)
if getattr(args, 'eval_bleu', False):
assert getattr(args, 'eval_bleu_detok', None) is not None, (
'--eval-bleu-detok is required if using --eval-bleu; '
'try --eval-bleu-detok=moses (or --eval-bleu-detok=space '
'to disable detokenization, e.g., when using sentencepiece)'
)
detok_args = json.loads(getattr(args, 'eval_bleu_detok_args', '{}') or '{}')
self.tokenizer = encoders.build_tokenizer(Namespace(
tokenizer=getattr(args, 'eval_bleu_detok', None),
**detok_args
))
gen_args = json.loads(getattr(args, 'eval_bleu_args', '{}') or '{}')
self.sequence_generator = self.build_generator([model], Namespace(**gen_args))
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.args.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output['_bleu_sys_len'] = bleu.sys_len
logging_output['_bleu_ref_len'] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output['_bleu_counts_' + str(i)] = bleu.counts[i]
logging_output['_bleu_totals_' + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.args.eval_bleu:
def sum_logs(key):
return sum(log.get(key, 0) for log in logging_outputs)
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs('_bleu_counts_' + str(i)))
totals.append(sum_logs('_bleu_totals_' + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar('_bleu_counts', np.array(counts))
metrics.log_scalar('_bleu_totals', np.array(totals))
metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))
metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if 'smooth_method' in fn_sig:
smooth = {'smooth_method': 'exp'}
else:
smooth = {'smooth': 'exp'}
bleu = sacrebleu.compute_bleu(
correct=meters['_bleu_counts'].sum,
total=meters['_bleu_totals'].sum,
sys_len=meters['_bleu_sys_len'].sum,
ref_len=meters['_bleu_ref_len'].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived('bleu', compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.args.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=(
"UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"
),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]['tokens']))
refs.append(decode(
utils.strip_pad(sample['target'][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
))
if self.args.eval_bleu_print_samples:
logger.info('example hypothesis: ' + hyps[0])
logger.info('example reference: ' + refs[0])
if self.args.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none')
else:
return sacrebleu.corpus_bleu(hyps, [refs])
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/translation_lev.py | fairseq/tasks/translation_lev.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from fairseq.data import LanguagePairDataset
from fairseq.utils import new_arange
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
from fairseq import utils
@register_task('translation_lev')
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = 2 + ((target_length - 2) * target_score.new_zeros(
target_score.size(0), 1).uniform_()).long()
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = target_tokens.gather(
1, target_rank).masked_fill_(target_cutoff, pad).gather(
1,
target_rank.masked_fill_(target_cutoff,
max_len).sort(1)[1])
prev_target_tokens = prev_target_tokens[:, :prev_target_tokens.
ne(pad).sum(1).max()]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = target_tokens.ne(pad) & \
target_tokens.ne(bos) & \
target_tokens.ne(eos)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = target_tokens.eq(bos) | target_tokens.eq(
eos) | target_tokens.eq(pad)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == 'random_delete':
return _random_delete(target_tokens)
elif self.args.noise == 'random_mask':
return _random_mask(target_tokens)
elif self.args.noise == 'full_mask':
return _full_mask(target_tokens)
elif self.args.noise == 'no_noise':
return target_tokens
else:
raise NotImplementedError
def build_generator(self, models, args):
# add models input to match the API for SequenceGenerator
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, 'iter_decode_eos_penalty', 0.0),
max_iter=getattr(args, 'iter_decode_max_iter', 10),
beam_size=getattr(args, 'iter_decode_with_beam', 1),
reranking=getattr(args, 'iter_decode_with_external_reranker', False),
decoding_format=getattr(args, 'decoding_format', None),
adaptive=not getattr(args, 'iter_decode_force_max_iter', False),
retain_history=getattr(args, 'retain_iter_history', False))
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
# Though see Susanto et al. (ACL 2020): https://www.aclweb.org/anthology/2020.acl-main.325/
raise NotImplementedError("Constrained decoding with the translation_lev task is not supported")
return LanguagePairDataset(
src_tokens, src_lengths, self.source_dictionary, append_bos=True
)
def train_step(self,
sample,
model,
criterion,
optimizer,
update_num,
ignore_grad=False):
model.train()
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/translation_multi_simple_epoch.py | fairseq/tasks/translation_multi_simple_epoch.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import datetime
import time
import torch
from fairseq.data import (
data_utils,
FairseqDataset,
iterators,
LanguagePairDataset,
ListDataset,
)
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq.data.multilingual.sampling_method import SamplingMethod
from fairseq.data.multilingual.multilingual_data_manager import MultilingualDatasetManager
###
def get_time_gap(s, e):
return (datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)).__str__()
###
logger = logging.getLogger(__name__)
@register_task('translation_multi_simple_epoch')
class TranslationMultiSimpleEpochTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
langs (List[str]): a list of languages that are being supported
dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries
training (bool): whether the task should be configured for training or not
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('--keep-inference-langtok', action='store_true',
help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
# fmt: on
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(
args, self.lang_pairs, langs, dicts, self.sampling_method)
@classmethod
def setup_task(cls, args, **kwargs):
langs, dicts, training = MultilingualDatasetManager.prepare(
cls.load_dictionary, args, **kwargs
)
dict0 = None
for _, lang_dict in dicts.items():
if dict0 is None:
dict0 = lang_dict
else:
assert (
dict0 == lang_dict
), "Diffrent dictionary are specified for different languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all languages"
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split in self.datasets:
dataset = self.datasets[split]
if self.has_sharded_data(split) and dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
# no need to load next shard so skip loading
# also this avoid always loading from beginning of the data
return
else:
shard_epoch = None
logger.info(f'loading data for {split} epoch={epoch}/{shard_epoch}')
self.datasets[split] = self.data_manager.load_sampled_multi_epoch_dataset(
split,
self.training,
epoch=epoch, combine=combine, shard_epoch=shard_epoch, **kwargs
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError("Constrained decoding with the multilingual_translation task is not supported")
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
src_langtok_spec, tgt_langtok_spec = self.args.langtoks['main']
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(
dataset,
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(
self.args.source_lang,
self.args.target_lang,
dataset=dataset.src,
spec=src_langtok_spec,
)
return dataset
def build_generator(
self, models, args,
seq_gen_cls=None, extra_gen_cls_kwargs=None,
):
if not getattr(args, 'keep_inference_langtok', False):
_, tgt_langtok_spec = self.args.langtoks['main']
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
extra_gen_cls_kwargs['symbols_to_strip_from_output'] = {tgt_lang_tok}
return super().build_generator(
models, args,
seq_gen_cls=None,
extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
_, tgt_langtok_spec = self.args.langtoks['main']
if not self.args.lang_tok_replacing_bos_eos:
if prefix_tokens is None and tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.size(0)
prefix_tokens = torch.LongTensor(
[[tgt_lang_tok]]
).expand(bsz, 1).to(src_tokens)
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
else:
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.data_manager.get_decoder_langtok(self.args.target_lang, tgt_langtok_spec)
if tgt_langtok_spec else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
return next(iter(self.dicts.values()))
@property
def target_dictionary(self):
return next(iter(self.dicts.values()))
def create_batch_sampler_func(
self, max_positions, ignore_invalid_inputs,
max_tokens, max_sentences
):
def construct_batch_sampler(
dataset, epoch
):
splits = [s for s, _ in self.datasets.items() if self.datasets[s] == dataset]
split = splits[0] if len(splits) > 0 else None
if epoch is not None:
dataset.set_epoch(epoch)
start_time = time.time()
# get indices ordered by example size
indices = dataset.ordered_indices()
logger.debug(f'[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}')
# filter examples that are too large
if max_positions is not None:
my_time = time.time()
indices = self.filter_indices_by_size(
indices,
dataset,
max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
)
logger.debug(f'[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}')
# create mini-batches with given size constraints
my_time = time.time()
batch_sampler = data_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
)
logger.debug(f'[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}')
logger.debug(f'[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}')
return batch_sampler
return construct_batch_sampler
# we need to override get_batch_iterator because we want to reset the epoch iterator each time
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1,
data_buffer_size=0, disable_iterator_cache=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# initialize the dataset with the correct starting epoch
assert isinstance(dataset, FairseqDataset)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
if (
self.args.sampling_method == 'RoundRobin'
):
batch_iter = super().get_batch_iterator(
dataset,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
data_buffer_size=data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(
max_positions, ignore_invalid_inputs,
max_tokens, max_sentences)
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=construct_batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/__init__.py | fairseq/tasks/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(args, **kwargs):
return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith('_')
and not file.startswith('.')
and (file.endswith('.py') or os.path.isdir(path))
):
task_name = file[:file.find('.py')] if file.endswith('.py') else file
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/sentence_ranking.py | fairseq/tasks/sentence_ranking.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq import utils
from fairseq.data import (
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset
)
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
logger = logging.getLogger(__name__)
@register_task('sentence_ranking')
class SentenceRankingTask(LegacyFairseqTask):
"""
Ranking task on multiple sentences.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='FILE',
help='file prefix for data')
parser.add_argument('--num-classes', type=int,
help='number of sentences to be ranked')
parser.add_argument('--init-token', type=int,
help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int,
help='add separator token between inputs')
parser.add_argument('--no-shuffle', action='store_true')
parser.add_argument('--shorten-method', default='none',
choices=['none', 'truncate', 'random_crop'],
help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='',
help='comma-separated list of dataset splits to apply shortening to, '
'e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--max-option-length', type=int,
help='max length for each option')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
@classmethod
def load_dictionary(cls, args, filename, source=True):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'sentence_ranking', \
'Must set --criterion=sentence_ranking'
# load data dictionary
data_dict = cls.load_dictionary(
args,
os.path.join(args.data, 'input0', 'dict.txt'),
source=True,
)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
return SentenceRankingTask(args, data_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
def get_path(type, split):
return os.path.join(self.args.data, type, split)
def make_dataset(type, dictionary):
split_path = get_path(type, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
return dataset
input0 = make_dataset('input0', self.source_dictionary)
input_options = [
make_dataset(
'input{idx}'.format(idx=idx + 1),
self.source_dictionary
)
for idx in range(self.args.num_classes)
]
if self.args.separator_token is not None:
input0 = PrependTokenDataset(input0, self.args.separator_token)
src_tokens = []
for input_option in input_options:
if self.args.init_token is not None:
input_option = PrependTokenDataset(input_option, self.args.init_token)
if self.args.max_option_length is not None:
input_option = TruncateDataset(input_option, self.args.max_option_length)
src_token = ConcatSentencesDataset(input_option, input0)
src_token = maybe_shorten_dataset(
src_token,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.max_positions,
self.args.seed,
)
src_tokens.append(src_token)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens[0]))
dataset = {
'id': IdDataset(),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens[0], reduce=True),
}
for src_token_idx in range(len(src_tokens)):
dataset.update(
{
'net_input{idx}'.format(idx=src_token_idx+1): {
'src_tokens': RightPadDataset(
src_tokens[src_token_idx],
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': NumelDataset(src_tokens[src_token_idx], reduce=False),
}
}
)
label_path = '{}.label'.format(get_path('label', split))
if os.path.exists(label_path):
with open(label_path) as h:
dataset.update(
target=RawLabelDataset([
int(x.strip()) for x in h.readlines()
])
)
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
getattr(args, 'ranking_head_name', 'sentence_classification_head'),
num_classes=1,
)
return model
def max_positions(self):
return self.args.max_positions
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/masked_lm.py | fairseq/tasks/masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
data_utils,
Dictionary,
IdDataset,
MaskTokensDataset,
NestedDictionaryDataset,
NumelDataset,
NumSamplesDataset,
RightPadDataset,
PrependTokenDataset,
SortDataset,
TokenBlockDataset,
)
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('masked_lm')
class MaskedLMTask(LegacyFairseqTask):
"""Task for training masked language models (e.g., BERT, RoBERTa)."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
parser.add_argument('--mask-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float,
help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float,
help='probability of replacing a token with a random token')
parser.add_argument('--freq-weighted-replacement', default=False, action='store_true',
help='sample random replacement words based on word frequencies')
parser.add_argument('--mask-whole-words', default=False, action='store_true',
help='mask whole words; you may also want to set --bpe')
parser.add_argument('--shorten-method', default='none',
choices=['none', 'truncate', 'random_crop'],
help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='',
help='comma-separated list of dataset splits to apply shortening to, '
'e.g., "train,valid" (default: all dataset splits)')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
@classmethod
def setup_task(cls, args, **kwargs):
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode=self.args.sample_break_mode,
)
logger.info('loaded {} blocks from: {}'.format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
# create masked input and targets
mask_whole_words = get_whole_word_mask(self.args, self.source_dictionary) \
if self.args.mask_whole_words else None
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
dataset,
self.source_dictionary,
pad_idx=self.source_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.args.seed,
mask_prob=self.args.mask_prob,
leave_unmasked_prob=self.args.leave_unmasked_prob,
random_token_prob=self.args.random_token_prob,
freq_weighted_replacement=self.args.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(src_dataset))
self.datasets[split] = SortDataset(
NestedDictionaryDataset(
{
'id': IdDataset(),
'net_input': {
'src_tokens': RightPadDataset(
src_dataset,
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': RightPadDataset(
tgt_dataset,
pad_idx=self.source_dictionary.pad(),
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
},
sizes=[src_dataset.sizes],
),
sort_order=[
shuffle,
src_dataset.sizes,
],
)
def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
src_dataset = RightPadDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
self.args.tokens_per_sample - 1, # one less for <s>
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode='eos',
),
pad_idx=self.source_dictionary.pad(),
)
src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
src_dataset = NestedDictionaryDataset(
{
'id': IdDataset(),
'net_input': {
'src_tokens': src_dataset,
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
},
sizes=src_lengths,
)
if sort:
src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
return src_dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/language_modeling.py | fairseq/tasks/language_modeling.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import torch
from fairseq import utils
from fairseq.data import (
AppendTokenDataset,
data_utils,
Dictionary,
IdDataset,
MonolingualDataset,
NestedDictionaryDataset,
NumelDataset,
PadDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
TransformEosDataset,
TruncatedDictionary,
)
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import FairseqTask, register_task
logger = logging.getLogger(__name__)
@register_task("language_modeling")
class LanguageModelingTask(FairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode', default='none',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int,
help='max number of tokens per sample for LM dataset')
parser.add_argument('--output-dictionary-size', default=-1, type=int,
help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true',
help='include self target')
parser.add_argument('--future-target', action='store_true',
help='include future target')
parser.add_argument('--past-target', action='store_true',
help='include past target')
parser.add_argument('--add-bos-token', action='store_true',
help='prepend beginning of sentence token (<s>)')
parser.add_argument('--max-target-positions', type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--shorten-method', default='none',
choices=['none', 'truncate', 'random_crop'],
help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='',
help='comma-separated list of dataset splits to apply shortening to, '
'e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--tokens-from-prev', default=0, type=int,
help="number of tokens from the previous input subsequence that we will attend to")
parser.add_argument('--sliding-inf', type= int, default = -1,
help="set this to 1 for token-by-token inference")
# fmt: on
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@classmethod
def setup_dictionary(cls, args, **kwargs):
dictionary = None
output_dictionary = None
if args.data:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
return (dictionary, output_dictionary)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if hasattr(args, "exclude_self_target"):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
"Unsupported language modeling target: {}".format(target)
)
return model
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = maybe_shorten_dataset(
dataset,
split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
self.datasets[split] = self._initialize_dataset(
dataset=dataset,
sizes=dataset.sizes,
src_vocab=self.dictionary,
tgt_vocab=self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=False,
targets=self.targets,
add_bos_token=self.args.add_bos_token,
)
def _initialize_dataset(self, **kwargs):
return MonolingualDataset(**kwargs)
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
"""
Generate batches for inference. We prepend an eos token to src_tokens
(or bos if `--add-bos-token` is set) and we append a <pad> to target.
This is convenient both for generation with a prefix and LM scoring.
"""
dataset = StripTokenDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
),
# remove eos from (end of) target sequence
self.source_dictionary.eos(),
)
src_dataset = PrependTokenDataset(
dataset,
token=(
self.source_dictionary.bos()
if getattr(self.args, "add_bos_token", False)
else self.source_dictionary.eos()
),
)
tgt_dataset = AppendTokenDataset(
dataset,
token=self.source_dictionary.pad()
)
return NestedDictionaryDataset(
{
"id": IdDataset(),
"net_input": {
"src_tokens": PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False),
"src_lengths": NumelDataset(src_dataset, reduce=False),
},
"target": PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False),
},
sizes=[np.array(src_lengths)],
)
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
# Generation will always be conditioned on bos_token
if getattr(self.args, "add_bos_token", False):
bos_token = self.source_dictionary.bos()
else:
bos_token = self.source_dictionary.eos()
if constraints is not None:
raise NotImplementedError("Constrained decoding with the language_modeling task is not supported")
# SequenceGenerator doesn't use src_tokens directly, we need to
# pass the `prefix_tokens` argument instead
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(bos_token).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token,
)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/translation_from_pretrained_bart.py | fairseq/tasks/translation_from_pretrained_bart.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.data import LanguagePairDataset
from fairseq import utils
from .translation import load_langpair_dataset, TranslationTask
from . import register_task
@register_task('translation_from_pretrained_bart')
class TranslationFromPretrainedBARTTask(TranslationTask):
"""
Translate from source language to target language with a model initialized with a multilingual pretrain.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--langs', required=True, metavar='LANG',
help='comma-separated list of monolingual language, '
'for example, "en,de,fr". These should match the '
'langs from pretraining (and be in the same order). '
'You should always add all pretraining language idx '
'during finetuning.')
parser.add_argument('--prepend-bos', action='store_true',
help='prepend bos token to each sentence, which matches '
'mBART pretraining')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
self.langs = args.langs.split(',')
for d in [src_dict, tgt_dict]:
for l in self.langs:
d.add_symbol('[{}]'.format(l))
d.add_symbol('<mask>')
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=getattr(self.args, 'max_source_positions', 1024),
max_target_positions=getattr(self.args, 'max_target_positions', 1024),
load_alignments=self.args.load_alignments,
prepend_bos=getattr(self.args, 'prepend_bos', False),
append_source_id=True
)
def build_generator(self, models, args):
if getattr(args, 'score_reference', False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
else:
from fairseq.sequence_generator import SequenceGenerator
return SequenceGenerator(
models,
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
eos=self.tgt_dict.index('[{}]'.format(self.args.target_lang))
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
src_lang_id = self.source_dictionary.index('[{}]'.format(self.args.source_lang))
source_tokens = []
for s_t in src_tokens:
s_t = torch.cat([s_t, s_t.new(1).fill_(src_lang_id)])
source_tokens.append(s_t)
dataset = LanguagePairDataset(source_tokens, src_lengths, self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints)
return dataset
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/legacy_masked_lm.py | fairseq/tasks/legacy_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import numpy as np
from fairseq import tokenizer
from fairseq.data import (
ConcatDataset,
indexed_dataset,
data_utils,
)
from fairseq.data import Dictionary
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.legacy.masked_lm_dictionary import BertDictionary
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('legacy_masked_lm')
class LegacyMaskedLMTask(LegacyFairseqTask):
"""
Task for training Masked LM (BERT) model.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments'
' per sample for BERT dataset')
parser.add_argument('--break-mode', default="doc", type=str, help='mode for breaking sentence')
parser.add_argument('--shuffle-dataset', action='store_true', default=False)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
@classmethod
def load_dictionary(cls, filename):
return BertDictionary.load(filename)
@classmethod
def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8):
d = BertDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
paths = utils.split_paths(args.data)
assert len(paths) > 0
dictionary = BertDictionary.load(os.path.join(paths[0], 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
loaded_datasets = []
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
logger.info("data_path", data_path)
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
path = os.path.join(data_path, split_k)
ds = indexed_dataset.make_dataset(
path,
impl=self.args.dataset_impl,
fix_lua_indexing=True,
dictionary=self.dictionary,
)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
with data_utils.numpy_seed(self.seed + k):
loaded_datasets.append(
BlockPairDataset(
ds,
self.dictionary,
ds.sizes,
self.args.tokens_per_sample,
break_mode=self.args.break_mode,
doc_break_size=1,
)
)
logger.info('{} {} {} examples'.format(data_path, split_k, len(loaded_datasets[-1])))
if not combine:
break
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
self.datasets[split] = MaskedLMDataset(
dataset=dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.cls(),
sep_token_idx=self.dictionary.sep(),
shuffle=self.args.shuffle_dataset,
seed=self.seed,
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/tasks/cross_lingual_lm.py | fairseq/tasks/cross_lingual_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import itertools
import logging
import os
import numpy as np
from fairseq import tokenizer
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.data import (
Dictionary,
ConcatDataset,
data_utils,
TokenBlockDataset,
)
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from fairseq.tasks import register_task, LegacyFairseqTask
from fairseq import utils
logger = logging.getLogger(__name__)
@register_task('cross_lingual_lm')
class CrossLingualLMTask(LegacyFairseqTask):
"""
Task for training cross-lingual language models.
For more details look at: https://arxiv.org/pdf/1901.07291.pdf
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments'
' per sample')
parser.add_argument('--monolingual-langs', default='en', type=str,
help='comma separated list of languages for which we'
' want to train XLM on')
parser.add_argument('--shuffle', action='store_true',
help='shuffle each monolingual dataset while'
' training')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.distributed_world_size = args.distributed_world_size
self.langs2id = self._lang_to_id(args.monolingual_langs)
def _lang_to_id(
self,
languages: str
):
"""
Build a map from languages to ids. These ids are used as segment labels
for cross-lingual LM training.
"""
lang2id = {}
langs = [l.strip() for l in languages.split(',')]
for id, lang in enumerate(langs):
lang2id[lang] = id
return lang2id
@classmethod
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename)
@classmethod
def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8):
d = MaskedLMDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
dictionary = MaskedLMDictionary.load(os.path.join(args.data, 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def _load_single_lang_dataset(self, split, epoch):
loaded_datasets = []
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
path = os.path.join(data_path, split_k)
ds = data_utils.load_indexed_dataset(path, self.dictionary, self.args.dataset_impl)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
# Since we append each block with the classification_token,
# we need to effectively create blocks of length
# tokens_per_sample-1
loaded_datasets.append(
TokenBlockDataset(
ds, ds.sizes, self.args.tokens_per_sample - 1,
pad=self.dictionary.pad(), eos=self.dictionary.eos(),
)
)
logger.info('{} {} {} examples'.format(data_path, split_k, len(loaded_datasets[-1])))
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
return dataset, sizes
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
dataset_map = OrderedDict()
for lang in self.langs2id.keys():
# Datasets are expected to be in "split.lang" format (Eg: train.en)
language_split = '{}.{}'.format(split, lang)
block_dataset, sizes = self._load_single_lang_dataset(split=language_split, epoch=epoch)
dataset_map[lang] = MaskedLMDataset(
dataset=block_dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.eos(),
sep_token_idx=self.dictionary.eos(),
shuffle=getattr(self.args, 'shuffle', False),
has_pairs=False,
segment_id=self.langs2id[lang],
seed=self.seed,
)
self.datasets[split] = MultiCorpusSampledDataset(dataset_map)
logger.info('{} {} {} examples'.format(
utils.split_paths(self.args.data)[epoch - 1], split, len(self.datasets[split]))
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/__init__.py | fairseq/model_parallel/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import criterions, modules, models # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/megatron_trainer.py | fairseq/model_parallel/megatron_trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
from fairseq import distributed_utils
from fairseq.trainer import Trainer
try:
from fairseq.model_parallel.megatron.mpu import (
get_data_parallel_group,
get_data_parallel_rank,
get_data_parallel_world_size,
get_model_parallel_group,
get_model_parallel_src_rank,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class MegatronTrainer(Trainer):
"""Main class for model parallel with data parallel training.
"""
def __init__(self, args, task, model, criterion):
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
super().__init__(args, task, model, criterion)
@property
def data_parallel_world_size(self):
return get_data_parallel_world_size()
@property
def data_parallel_process_group(self):
return get_data_parallel_group()
@property
def data_parallel_rank(self):
return get_data_parallel_rank()
@property
def is_data_parallel_master(self):
return get_model_parallel_src_rank() == 0
def clip_grad_norm(self, clip_norm):
def _aggregate_model_parallel_grad_norm(total_norm):
total_norm = total_norm ** 2
distributed_utils.all_reduce(total_norm, group=get_model_parallel_group())
total_norm = total_norm ** 0.5
return total_norm
return self.optimizer.clip_grad_norm(
clip_norm,
aggregate_norm_fn=_aggregate_model_parallel_grad_norm,
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/transformer_lm.py | fairseq/model_parallel/models/transformer_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer_lm import (
base_lm_architecture,
TransformerLanguageModel,
)
from fairseq.model_parallel.models.transformer import ModelParallelTransformerDecoder
try:
from fairseq.model_parallel.megatron.mpu import VocabParallelEmbedding
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model('model_parallel_transformer_lm')
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
# make sure all arguments are present in older models
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, 'max_target_positions', None) is None:
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.character_embeddings:
raise NotImplementedError("Character embeddings is not supported for model parallel")
elif args.adaptive_input:
raise NotImplementedError("Adaptive input is not supported for model parallel")
else:
embed_tokens = cls.build_embedding(args, task.source_dictionary, args.decoder_input_dim)
decoder = ModelParallelTransformerDecoder(
args, task.target_dictionary, embed_tokens, no_encoder_attn=True,
)
return cls(decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=embed_dim ** -0.5)
nn.init.constant_(tensor[1], 0)
embed_tokens = VocabParallelEmbedding(len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init)
return embed_tokens
@register_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron')
def transformer_lm_megatron(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072 * 4)
args.decoder_layers = getattr(args, 'decoder_layers', 72)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
@register_model_architecture('model_parallel_transformer_lm', 'transformer_lm_megatron_11b')
def transformer_lm_megatron_11b(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 3072)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072 * 6)
args.decoder_layers = getattr(args, 'decoder_layers', 72)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 32)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/__init__.py | fairseq/model_parallel/models/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('fairseq.model_parallel.models.' + model_name)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/transformer.py | fairseq/model_parallel/models/transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
)
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
)
from fairseq.model_parallel.modules import (
ModelParallelTransformerDecoderLayer,
ModelParallelTransformerEncoderLayer,
)
try:
from fairseq.model_parallel.megatron.mpu import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
VocabParallelEmbedding,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
@register_model('model_parallel_transformer')
class ModelParallelTransformerModel(TransformerModel):
"""
Model parallel Transformer model.
"""
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=num_embeddings ** -0.5)
nn.init.constant_(tensor[1], 0)
emb = VocabParallelEmbedding(num_embeddings, embed_dim, padding_idx, init_method=_vocab_init)
# if provided, load from preloaded dictionaries
if path:
raise NotImplementedError("Loading of embedding from path is not supported for model parallel")
return emb
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return ModelParallelTransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return ModelParallelTransformerDecoder(
args,
tgt_dict,
embed_tokens,
no_encoder_attn=getattr(args, 'no_cross_attention', False),
)
class ModelParallelTransformerEncoder(TransformerEncoder):
"""
Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerEncoderLayer`.
"""
def build_encoder_layer(self, args):
return ModelParallelTransformerEncoderLayer(args)
class ModelParallelTransformerDecoder(TransformerDecoder):
"""
Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`ModelParallelTransformerDecoderLayer`.
"""
def build_decoder_layer(self, args, no_encoder_attn=False):
return ModelParallelTransformerDecoderLayer(args, no_encoder_attn)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if not self.share_input_output_embed:
raise NotImplementedError(
'Model parallel training currently requires --share-decoder-input-output-embed'
)
features = copy_to_model_parallel_region(features)
# project back to size of vocabulary
x = self.output_projection(features)
if getattr(self.args, 'criterion') != 'vocab_parallel_cross_entropy':
x = gather_from_model_parallel_region(x).contiguous()
return x
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/roberta/model.py | fairseq/model_parallel/models/roberta/model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
RoBERTa: A Robustly Optimized BERT Pretraining Approach.
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
RobertaEncoder,
RobertaLMHead,
RobertaClassificationHead,
)
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
)
from fairseq.model_parallel.modules import (
ModelParallelTransformerSentenceEncoder,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
try:
from fairseq.model_parallel.megatron.mpu import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
@register_model('model_parallel_roberta')
class ModelParallelRobertaModel(RobertaModel):
def __init__(self, args, encoder):
super().__init__(args, encoder)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
super(ModelParallelRobertaModel, ModelParallelRobertaModel).add_args(parser)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
if not hasattr(args, 'max_positions'):
args.max_positions = args.tokens_per_sample
if getattr(args, 'untie_weights_roberta', False):
raise NotImplementedError(
'--untie-weights-roberta is not supported in model parallel mode'
)
encoder = ModelParallelRobertaEncoder(args, task.source_dictionary)
return cls(args, encoder)
def forward(self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, **kwargs):
if classification_head_name is not None:
features_only = True
x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ModelParallelRobertaClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
class ModelParallelRobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the unmasked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
x = copy_to_model_parallel_region(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight)
x = gather_from_model_parallel_region(x).contiguous()
x = x + self.bias
return x
class ModelParallelRobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout):
super().__init__()
self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class ModelParallelRobertaEncoder(FairseqEncoder):
"""RoBERTa encoder.
Implements the :class:`~fairseq.models.FairseqDecoder` interface required
by :class:`~fairseq.models.FairseqLanguageModel`.
"""
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
# RoBERTa is a sentence encoder model, so users will intuitively trim
# encoder layers. However, the implementation uses the fairseq decoder,
# so we fix here.
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
args.decoder_layers_to_keep = args.encoder_layers_to_keep
args.encoder_layers_to_keep = None
self.sentence_encoder = ModelParallelTransformerSentenceEncoder(
padding_idx=dictionary.pad(),
vocab_size=len(dictionary),
num_encoder_layers=args.encoder_layers,
embedding_dim=args.encoder_embed_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
layerdrop=args.encoder_layerdrop,
max_seq_len=args.max_positions,
num_segments=0,
encoder_normalize_before=False,
apply_bert_init=False,
activation_fn=args.activation_fn,
)
self.lm_head = ModelParallelRobertaLMHead(
embed_dim=args.encoder_embed_dim,
output_dim=len(dictionary),
activation_fn=args.activation_fn,
weight=self.sentence_encoder.embed_tokens.weight,
)
def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, **unused):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(src_tokens, return_all_hiddens=return_all_hiddens)
if not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
def extract_features(self, src_tokens, return_all_hiddens=False, **unused):
inner_states, _ = self.sentence_encoder(
src_tokens,
last_state_only=not return_all_hiddens,
)
features = inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C
return features, {'inner_states': inner_states if return_all_hiddens else None}
def output_layer(self, features, masked_tokens=None, **unused):
return self.lm_head(features, masked_tokens)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
@register_model_architecture('model_parallel_roberta', 'model_parallel_roberta')
def base_architecture(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
args.encoder_layers_to_keep = getattr(args, 'encoder_layers_to_keep', None)
args.encoder_layerdrop = getattr(args, 'encoder_layerdrop', 0.0)
@register_model_architecture('model_parallel_roberta', 'model_parallel_roberta_base')
def roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture('model_parallel_roberta', 'model_parallel_roberta_large')
def roberta_large_architecture(args):
args.encoder_layers = getattr(args, 'encoder_layers', 24)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
base_architecture(args)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/roberta/__init__.py | fairseq/model_parallel/models/roberta/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import * # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/pipeline_parallel_transformer/model.py | fairseq/model_parallel/models/pipeline_parallel_transformer/model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
base_architecture,
transformer_iwslt_de_en,
transformer_wmt_en_de_big,
)
from fairseq.modules import SinusoidalPositionalEmbedding
from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import (
Embedding,
TransformerEncoderLayer,
TransformerDecoderLayer,
TransformerEncoderEmbedding,
TransformerEncoderLayerNorm,
TransformerDecoderEmbedding,
TransformerDecoderOutputLayer,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model('pipeline_parallel_transformer')
class PipelineParallelTransformerModel(BaseFairseqModel):
def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint):
try:
from fairscale.nn import Pipe
except ImportError:
raise ImportError('Please install fairscale with: pip install fairscale')
super().__init__()
assert isinstance(encoder, FairseqEncoder)
assert isinstance(decoder, FairseqDecoder)
module_list = nn.Sequential(
encoder.embedding_layer,
*list(encoder.encoder_layers),
encoder.final_layer_norm,
decoder.embedding_layer,
*list(decoder.decoder_layers),
decoder.decoder_output_layer,
)
self.devices = devices
self.model = Pipe(
module_list,
balance=balance,
devices=devices,
chunks=chunks,
checkpoint=checkpoint,
)
self.encoder_max_positions = self.max_positions_helper(
encoder.embedding_layer,
'max_source_positions'
)
self.decoder_max_positions = self.max_positions_helper(
decoder.embedding_layer,
'max_target_positions'
)
self.adaptive_softmax = getattr(decoder, 'adaptive_softmax', None)
def forward(self, src_tokens, src_lengths, prev_output_tokens):
input_lst = [src_tokens, src_lengths, prev_output_tokens]
input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst)
return self.model(input)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
@classmethod
def build_model_base(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path,
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return (encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@classmethod
def build_model(cls, args, task):
encoder, decoder = cls.build_model_base(args, task)
return PipelineParallelTransformerModel(
encoder=encoder,
decoder=decoder,
balance=args.pipeline_balance,
devices=args.pipeline_devices,
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder_max_positions, self.decoder_max_positions)
def max_positions_helper(self, embedding_layer,
max_positions_field='max_source_positions'):
"""Maximum input length supported by the encoder or decoder."""
if embedding_layer.embed_positions is None:
return getattr(embedding_layer, max_positions_field)
return min(getattr(embedding_layer, max_positions_field),
embedding_layer.embed_positions.max_positions)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None:
if sample is not None:
assert 'target' in sample
target = sample['target']
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output, target=target)
return out.exp_() if not log_probs else out
logits = net_output
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=False)
else:
return utils.softmax(logits, dim=-1, onnx_trace=False)
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
def load_state_dict(self, state_dict, strict=True, args=None):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
is_regular_transformer = not any('model.partitions' in k for k in state_dict)
if is_regular_transformer:
state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict)
return super().load_state_dict(state_dict, strict)
def convert_to_pipeline_parallel_state_dict(self, state_dict):
new_state_dict = self.state_dict()
encoder_layer_idx = 0
decoder_layer_idx = 0
encoder_key_suffixes = [
'self_attn.k_proj.weight', 'self_attn.k_proj.bias',
'self_attn.v_proj.weight', 'self_attn.v_proj.bias',
'self_attn.q_proj.weight', 'self_attn.q_proj.bias',
'self_attn.out_proj.weight', 'self_attn.out_proj.bias',
'self_attn_layer_norm.weight', 'self_attn_layer_norm.bias', 'fc1.weight',
'fc1.bias', 'fc2.weight', 'fc2.bias', 'final_layer_norm.weight',
'final_layer_norm.bias',
]
decoder_key_suffixes = [
'self_attn.k_proj.weight', 'self_attn.k_proj.bias',
'self_attn.v_proj.weight', 'self_attn.v_proj.bias',
'self_attn.q_proj.weight', 'self_attn.q_proj.bias',
'self_attn.out_proj.weight', 'self_attn.out_proj.bias',
'self_attn_layer_norm.weight', 'self_attn_layer_norm.bias',
'encoder_attn.k_proj.weight', 'encoder_attn.k_proj.bias',
'encoder_attn.v_proj.weight', 'encoder_attn.v_proj.bias',
'encoder_attn.q_proj.weight', 'encoder_attn.q_proj.bias',
'encoder_attn.out_proj.weight', 'encoder_attn.out_proj.bias',
'encoder_attn_layer_norm.weight', 'encoder_attn_layer_norm.bias',
'fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias',
'final_layer_norm.weight', 'final_layer_norm.bias'
]
for pid, partition in enumerate(self.model.partitions):
print(f"Begin Partition {pid}")
for mid, module in enumerate(partition):
# fmt: off
if isinstance(module, TransformerEncoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor']
if isinstance(module, TransformerEncoderLayer):
for suffix in encoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}']
encoder_layer_idx += 1
if isinstance(module, TransformerDecoderLayer):
for suffix in decoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}']
decoder_layer_idx += 1
if isinstance(module, TransformerEncoderLayerNorm):
if 'encoder.layer_norm.weight' in state_dict:
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias']
if isinstance(module, TransformerDecoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor']
if isinstance(module, TransformerDecoderOutputLayer):
new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight']
# fmt: on
return new_state_dict
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.embedding_layer = TransformerEncoderEmbedding(args, embed_tokens)
layers = [
TransformerEncoderLayer(args) for i in range(args.encoder_layers)
]
# Note: layer drop not supported yet
# Note: layer wise attention not supported yet
self.encoder_layers = nn.Sequential(*layers)
if isinstance(embed_tokens, nn.ModuleList):
emb_dim = sum(e.embedding_dim for e in embed_tokens)
else:
emb_dim = embed_tokens.embedding_dim
self.final_layer_norm = \
TransformerEncoderLayerNorm(args, emb_dim)
def forward(self, src_tokens, src_lengths, prev_output_tokens):
"""
Args:
input_tuple(
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
prev_output_tokens
)
Returns:
output_tuple(
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- prev_output_tokens
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
)
"""
input_tuple = (src_tokens, src_lengths, prev_output_tokens)
encoder_embed_output_tuple = self.embedding_layer(input_tuple)
encoder_layers_output = self.encoder_layers(encoder_embed_output_tuple)
return self.final_layer_norm(encoder_layers_output)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out.encoder_out is not None:
encoder_out = encoder_out._replace(
encoder_out=encoder_out.encoder_out.index_select(1, new_order)
)
if encoder_out.encoder_padding_mask is not None:
encoder_out = encoder_out._replace(
encoder_padding_mask=encoder_out.encoder_padding_mask.index_select(0, new_order)
)
if encoder_out.encoder_embedding is not None:
encoder_out = encoder_out._replace(
encoder_embedding=encoder_out.encoder_embedding.index_select(0, new_order)
)
if encoder_out.encoder_states is not None:
for idx, state in enumerate(encoder_out.encoder_states):
encoder_out.encoder_states[idx] = state.index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embedding_layer.embed_positions is None:
return self.embedding_layer.max_source_positions
return min(self.embedding_layer.max_source_positions,
self.embedding_layer.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
print('deleting {0}'.format(weights_key))
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i))
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([3]))
self.embedding_layer = TransformerDecoderEmbedding(args, embed_tokens)
layers = [
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
self.decoder_layers = nn.Sequential(*layers)
self.decoder_output_layer = TransformerDecoderOutputLayer(args, embed_tokens, dictionary)
def forward(self, prev_output_tokens, encoder_out=None,):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
input = (prev_output_tokens, encoder_out)
embed_layer_output = self.embedding_layer(input)
state = self.decoder_layers(embed_layer_output)
return self.decoder_output_layer(state)
def extract_features(self, prev_output_tokens, encoder_out=None,):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
input = (prev_output_tokens, encoder_out)
embed_layer_output = self.embedding_layer(input)
state = self.decoder_layers(embed_layer_output)
return self.decoder_output_layer(state, apply_final_proj=False)
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embedding_layer.embed_positions is None:
return self.embedding_layer.max_target_positions
return min(self.embedding_layer.max_target_positions,
self.embedding_layer.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, '_future_mask')
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = '{}.embed_positions.weights'.format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m)
if k in state_dict:
state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k]
del state_dict[k]
version_key = '{}.version'.format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
@register_model_architecture('pipeline_parallel_transformer',
'transformer_iwslt_de_en_pipeline_parallel')
def transformer_iwslt_de_en_dist(args):
transformer_iwslt_de_en(args)
@register_model_architecture('pipeline_parallel_transformer',
'transformer_wmt_en_de_big_pipeline_parallel')
def transformer_wmt_en_de_big_dist(args):
transformer_wmt_en_de_big(args)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py | fairseq/model_parallel/models/pipeline_parallel_transformer/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .model import * # noqa
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py | fairseq/model_parallel/models/pipeline_parallel_transformer/layers.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import (
AdaptiveSoftmax,
LayerNorm,
PositionalEmbedding,
MultiheadAttention,
)
EncoderOut = namedtuple('TransformerEncoderOut', [
'encoder_out', # T x B x C
'encoder_padding_mask', # B x T
'encoder_embedding', # B x T x C
'encoder_states', # List[T x B x C]
])
class TransformerEncoderEmbedding(nn.Module):
""" Encoder Embedding + Positional Embedding """
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
if isinstance(embed_tokens, nn.ModuleList):
self.padding_idx = embed_tokens[0].padding_idx
embed_dim = sum(e.embedding_dim for e in embed_tokens)
else:
self.padding_idx = embed_tokens.padding_idx
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
if getattr(args, 'layernorm_embedding', False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
def forward(self, input):
# embed tokens and positions
src_tokens = input[0]
prev_output_tokens = input[2]
if isinstance(self.embed_tokens, nn.ModuleList):
x_embed_list = []
for embed_tokens_part in self.embed_tokens:
x_embed_list.append(embed_tokens_part(src_tokens))
embedded = torch.cat(x_embed_list, dim=-1)
else:
embedded = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * embedded
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
return (x, encoder_padding_mask, prev_output_tokens)
class TransformerEncoderLayerNorm(nn.Module):
"""
Layer norm at the the end of all encoder layers if
args.encoder_enormalize_before = True
"""
def __init__(self, args, embed_dim):
super().__init__()
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, input):
x = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
if self.layer_norm:
x = self.layer_norm(x)
# keeping track of the incremental_state is not supported yet
return (x, encoder_padding_mask, prev_output_tokens)
class TransformerDecoderEmbedding(nn.Module):
""" Decoder Embedding + Positional Embedding """
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = sum(e.embedding_dim for e in embed_tokens) \
if isinstance(embed_tokens, nn.ModuleList) \
else embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens[0].padding_idx \
if isinstance(embed_tokens, nn.ModuleList) \
else embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
def forward(self, input):
mt_task = False
if isinstance(input, tuple):
if len(input) == 3:
encoder_out = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
incremental_state = None # Hardcoding to avoid passing of None objects
mt_task = True
else:
# HACK for now, need to fix (TODO sidgoyal)
prev_output_tokens = input[0]
# discard "src_lengths"
encoder_out = None
encoder_padding_mask = None
incremental_state = None
else:
prev_output_tokens = input
encoder_out = None
encoder_padding_mask = None
incremental_state = None
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
if isinstance(self.embed_tokens, nn.ModuleList):
x_embed_list = []
for embed_tokens_part in self.embed_tokens:
x_embed_list.append(embed_tokens_part(prev_output_tokens))
x = self.embed_scale * torch.cat(x_embed_list, dim=-1)
else:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if mt_task:
return (x, encoder_out, encoder_padding_mask)
return x
class TransformerDecoderOutputLayer(nn.Module):
def __init__(self, args, embed_tokens, dictionary):
super().__init__()
self.share_input_output_embed = args.share_decoder_input_output_embed
self.output_embed_dim = args.decoder_output_dim
embed_dim = args.decoder_embed_dim
self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None
self.adaptive_softmax = None
if args.adaptive_softmax_cutoff is not None:
assert not isinstance(embed_tokens, nn.ModuleList)
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
embed_tokens.weight.shape[1],
embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = embed_tokens.weight
elif not self.share_input_output_embed:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, input, apply_final_proj=True):
if isinstance(input, tuple):
x = input[0]
else:
x = input
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
if apply_final_proj:
x = self.output_layer(x)
return x
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout, self_attention=True
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if k in state_dict:
state_dict[
'{}.{}.{}'.format(name, new, m)
] = state_dict[k]
del state_dict[k]
def forward(self, input):
"""
Args:
input (Tuple):
input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
input[1] (ByteTensor/FloatTensor): encoder padding mask -
binary ByteTensor of shape `(batch, src_len)` where padding elements
are indicated by ``1``.
input[2] (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing)
Returns:
output (Tuple):
output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)`
output[1] (ByteTensor/FloatTensor): encoder padding mask
output[2] (LongTensor): previous decoder outputs
"""
x = input[0]
encoder_padding_mask = input[1]
prev_output_tokens = input[2]
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return (x, encoder_padding_mask, prev_output_tokens)
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
embed_dim=self.embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=True
)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, 'activation_fn', 'relu')
)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, 'encoder_embed_dim', None),
vdim=getattr(args, 'encoder_embed_dim', None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, input):
"""
Args:
input (Tuple):
input[0] (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
input[1] (Tensor): encoder output of shape `(batch, src_len, embed_dim)`
input[2] (ByteTensor/FloatTensor): encoder padding mask -
binary ByteTensor of shape `(batch, src_len)` where padding elements
are indicated by ``1``.
Returns:
output (Tuple):
output[0] (Tensor): encoded output of shape `(batch, src_len, embed_dim)`
output[1] (ByteTensor/FloatTensor): encoder padding mask
output[2] (LongTensor): previous decoder outputs
"""
# Note: incremental state is not yet supported
mt_task = False
if isinstance(input, tuple):
x = input[0]
encoder_out = input[1]
encoder_padding_mask = input[2]
incremental_state = None
mt_task = True
else:
x = input
encoder_out = None
encoder_padding_mask = None
incremental_state = None
if incremental_state is None:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
# TODO: add back prev_self_attn_state, prev_attn_state,
# self_attn_padding_mask
prev_self_attn_state = None
prev_attn_state = None
self_attn_padding_mask = None
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
if mt_task:
return (x, encoder_out, encoder_padding_mask)
return x
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py | fairseq/model_parallel/criterions/vocab_parallel_cross_entropy.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
try:
from fairseq.model_parallel.megatron.mpu.cross_entropy import vocab_parallel_cross_entropy
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@register_criterion('vocab_parallel_cross_entropy')
class VocabParallelCrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
target = sample['target']
loss = vocab_parallel_cross_entropy(net_output[0].float(), target)
loss = (loss * (target != self.padding_idx)).sum()
sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar('nll_loss', loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
else:
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/criterions/__init__.py | fairseq/model_parallel/criterions/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.model_parallel.criterions.' + module)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/modules/transformer_layer.py | fairseq/model_parallel/modules/transformer_layer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules import (
TransformerEncoderLayer,
TransformerDecoderLayer,
)
from fairseq.model_parallel.modules import ModelParallelMultiheadAttention
try:
from fairseq.model_parallel.megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer block over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer block.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=not getattr(args, "cross_self_attention", False),
)
def build_encoder_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/modules/transformer_sentence_encoder.py | fairseq/model_parallel/modules/transformer_sentence_encoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import (
LayerNorm,
MultiheadAttention,
PositionalEmbedding,
TransformerSentenceEncoder,
)
from fairseq.model_parallel.modules import (
ModelParallelTransformerSentenceEncoderLayer,
)
try:
from fairseq.model_parallel.megatron.mpu import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
VocabParallelEmbedding,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
import random
class ModelParallelTransformerSentenceEncoder(TransformerSentenceEncoder):
"""
Implementation for a Model Parallel Bi-directional Transformer based
Sentence Encoder used in BERT/XLM style pre-trained models.
"""
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx)
def build_transformer_sentence_encoder_layer(
self,
embedding_dim,
ffn_embedding_dim,
num_attention_heads,
dropout,
attention_dropout,
activation_dropout,
activation_fn,
export,
**unused,
):
return ModelParallelTransformerSentenceEncoderLayer(
embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_attention_heads=num_attention_heads,
dropout=dropout,
attention_dropout=attention_dropout,
activation_dropout=activation_dropout,
activation_fn=activation_fn,
export=export,
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/modules/multihead_attention.py | fairseq/model_parallel/modules/multihead_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor, nn
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
try:
from fairseq.model_parallel.megatron.mpu import (
get_cuda_rng_tracker,
get_model_parallel_world_size,
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
"""Model parallel Multi-headed attention.
This performs the Multi-headed attention over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.model_parallel_size = get_model_parallel_world_size()
self.num_heads_partition = num_heads // self.model_parallel_size
assert (
self.num_heads_partition * self.model_parallel_size == num_heads
), "Number of heads must be divisble by model parallel size"
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and value to be of the same size"
)
self.k_proj = ColumnParallelLinear(self.kdim, embed_dim, bias=bias, gather_output=False)
self.v_proj = ColumnParallelLinear(self.vdim, embed_dim, bias=bias, gather_output=False)
self.q_proj = ColumnParallelLinear(embed_dim, embed_dim, bias=bias, gather_output=False)
self.out_proj = RowParallelLinear(embed_dim, embed_dim, bias=bias, input_is_parallel=True)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
**unused_kwargs,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads_partition, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads_partition, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = ModelParallelMultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads_partition, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads_partition, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads_partition, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads_partition, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads_partition, tgt_len, src_len)
attn_weights_float = utils.softmax(
attn_weights, dim=-1
)
attn_weights = attn_weights_float.type_as(attn_weights)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads_partition, tgt_len, self.head_dim]
embed_dim_partition = embed_dim // self.model_parallel_size
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
attn = self.out_proj(attn)
# return attn_weights None to keep the return type same as single gpu multihead attention
# This will be deprecated.
attn_weights: Optional[Tensor] = None
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
if input_buffer[k] is not None:
input_buffer[k] = input_buffer[k].index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/modules/__init__.py | fairseq/model_parallel/modules/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .multihead_attention import ModelParallelMultiheadAttention
from .transformer_layer import ModelParallelTransformerEncoderLayer, ModelParallelTransformerDecoderLayer
from .transformer_sentence_encoder_layer import ModelParallelTransformerSentenceEncoderLayer
from .transformer_sentence_encoder import ModelParallelTransformerSentenceEncoder
__all__ = [
'ModelParallelMultiheadAttention',
'ModelParallelTransformerEncoderLayer',
'ModelParallelTransformerDecoderLayer',
'ModelParallelTransformerSentenceEncoder',
'ModelParallelTransformerSentenceEncoderLayer',
]
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/model_parallel/modules/transformer_sentence_encoder_layer.py | fairseq/model_parallel/modules/transformer_sentence_encoder_layer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import (
TransformerSentenceEncoderLayer
)
from fairseq.model_parallel.modules import ModelParallelMultiheadAttention
try:
from fairseq.model_parallel.megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class ModelParallelTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
"""
Implements a Model Parallel Transformer Encoder Layer used in
BERT/XLM style pre-trained models.
"""
def build_fc1(self, input_dim, output_dim, **unused):
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, **unused):
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(
self,
embed_dim,
num_attention_heads,
dropout,
**kwargs,
):
return ModelParallelMultiheadAttention(
embed_dim,
num_attention_heads,
dropout=dropout,
self_attention=True
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
return x, None
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/dataclass/data_class.py | fairseq/dataclass/data_class.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Any, List, Optional, Dict, Type, Tuple
import torch
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.utils import FairseqDataclass, ChoiceEnum
import sys
from fairseq.tasks import TASK_DATACLASS_REGISTRY
from fairseq.models import ARCH_DATACLASS_REGISTRY
from fairseq.criterions import CRITERION_DATACLASS_REGISTRY
from fairseq.optim import OPTIMIZER_DATACLASS_REGISTRY
from fairseq.optim.bmuf import FairseqBMUFConfig
from fairseq.optim.lr_scheduler import LR_SCHEDULER_DATACLASS_REGISTRY
from hydra.core.config_store import ConfigStore
from argparse import Namespace
LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"])
DDP_BACKEND_CHOICES = ChoiceEnum(["c10d", "no_c10d"])
DISTRIBUTED_WRAPPER_CHOICES = ChoiceEnum(["DDP", "SlowMo"])
@dataclass
class CommonParams(FairseqDataclass):
# This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were
# used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc.
no_progress_bar: bool = field(
default=False, metadata={"help": "disable progress bar"}
)
log_interval: int = field(
default=100,
metadata={
"help": "log progress every N batches (when progress bar is disabled)"
},
)
log_format: Optional[LOG_FORMAT_CHOICES] = field(
default=None, metadata={"help": "log format to use"}
)
tensorboard_logdir: Optional[str] = field(
default=None,
metadata={
"help": "path to save logs for tensorboard, should match --logdir "
"of running tensorboard (default: no tensorboard logging)"
},
)
seed: int = field(
default=1, metadata={"help": "pseudo random number generator seed"}
)
cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"})
tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"})
bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"})
memory_efficient_bf16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of BF16 training; implies --bf16"
},
)
fp16: bool = field(default=False, metadata={"help": "use FP16"})
memory_efficient_fp16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of FP16 training; implies --fp16"
},
)
fp16_no_flatten_grads: bool = field(
default=False, metadata={"help": "don't flatten FP16 grads tensor"}
)
fp16_init_scale: int = field(
default=2 ** 7, metadata={"help": "default FP16 loss scale"}
)
fp16_scale_window: Optional[int] = field(
default=None,
metadata={"help": "number of updates before increasing loss scale"},
)
fp16_scale_tolerance: float = field(
default=0.0,
metadata={
"help": "pct of updates that can overflow before decreasing the loss scale"
},
)
min_loss_scale: float = field(
default=1e-4,
metadata={"help": "minimum FP16 loss scale, after which training is stopped"},
)
threshold_loss_scale: Optional[float] = field(
default=None, metadata={"help": "threshold FP16 loss scale from below"}
)
user_dir: Optional[str] = field(
default=None,
metadata={
"help": "path to a python module containing custom extensions (tasks and/or architectures)"
},
)
empty_cache_freq: int = field(
default=0,
metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"},
)
all_gather_list_size: int = field(
default=16384,
metadata={"help": "number of bytes reserved for gathering stats from workers"},
)
model_parallel_size: int = field(
default=1, metadata={"help": "total number of GPUs to parallelize model over"}
)
checkpoint_suffix: str = field(
default="", metadata={"help": "suffix to add to the checkpoint file name"}
)
quantization_config_path: Optional[str] = field(
default=None, metadata={"help": "path to quantization config file"}
)
profile: bool = field(
default=False, metadata={"help": "enable autograd profiler emit_nvtx"}
)
@dataclass
class DistributedTrainingParams(FairseqDataclass):
distributed_world_size: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "total number of GPUs across all nodes (default: all visible GPUs)"
},
)
distributed_rank: Optional[int] = field(
default=0, metadata={"help": "rank of the current worker"}
)
distributed_backend: str = field(
default="nccl", metadata={"help": "distributed backend"}
)
distributed_init_method: Optional[str] = field(
default=None,
metadata={
"help": "typically tcp://hostname:port that will be used to "
"establish initial connetion"
},
)
distributed_port: int = field(
default=-1,
metadata={
"help": "port number (not required if using --distributed-init-method)"
},
)
device_id: int = field(
default=0,
metadata={"help": "which GPU to use (usually configured automatically)"},
)
local_rank: int = field(
default=0,
metadata={"help": "which GPU to use (usually configured automatically)"},
)
distributed_no_spawn: bool = field(
default=False,
metadata={
"help": "do not spawn multiple processes even if multiple GPUs are visible"
},
)
ddp_backend: DDP_BACKEND_CHOICES = field(
default="c10d", metadata={"help": "DistributedDataParallel backend"}
)
bucket_cap_mb: int = field(
default=25, metadata={"help": "bucket size for reduction"}
)
fix_batches_to_gpus: bool = field(
default=False,
metadata={
"help": "don't shuffle batches between GPUs; this reduces overall "
"randomness and may affect precision but avoids the cost of re-reading the data"
},
)
find_unused_parameters: bool = field(
default=False,
metadata={
"help": "disable unused parameter detection (not applicable to "
"no_c10d ddp-backend"
},
)
fast_stat_sync: bool = field(
default=False,
metadata={"help": "[deprecated] this is now defined per Criterion"},
)
broadcast_buffers: bool = field(
default=False,
metadata={
"help": "Copy non-trainable parameters between GPUs, such as "
"batchnorm population statistics"
},
)
distributed_wrapper: DISTRIBUTED_WRAPPER_CHOICES = field(
default="DDP", metadata={"help": "DistributedDataParallel backend"}
)
slowmo_momentum: Optional[float] = field(
default=None,
metadata={
"help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, "
"0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs"
},
)
slowmo_algorithm: str = field(
default="LocalSGD", metadata={"help": "whether to use LocalSGD or SGP"}
)
localsgd_frequency: int = field(
default=3, metadata={"help": "Local SGD allreduce frequency"}
)
nprocs_per_node: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "number of GPUs in each node. An allreduce operation across GPUs in "
"a node is very fast. Hence, we do allreduce across GPUs in a node, "
"and gossip across different nodes"
},
)
@dataclass
class DatasetParams(FairseqDataclass):
num_workers: int = field(
default=1, metadata={"help": "how many subprocesses to use for data loading"}
)
skip_invalid_size_inputs_valid_test: bool = field(
default=False,
metadata={"help": "ignore too long or too short lines in valid and test set"},
)
max_tokens: Optional[int] = field(
default=None, metadata={"help": "maximum number of tokens in a batch"}
)
max_sentences: Optional[int] = field(
default=None, metadata={"help": "maximum number of sentences in a batch"}
)
batch_size: Optional[int] = field(
default=None, metadata={"help": "maximum number of sentences in a batch"}
)
required_batch_size_multiple: int = field(
default=8, metadata={"help": "batch size will be a multiplier of this value"}
)
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = field(
default=None, metadata={"help": "output dataset implementation"}
)
data_buffer_size: int = field(
default=10, metadata={"help": "Number of batches to preload"}
)
train_subset: str = field(
default="train",
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
valid_subset: str = field(
default="valid",
metadata={
"help": "comma separated list of data subsets to use for validation"
" (e.g. train, valid, test)"
},
)
validate_interval: int = field(
default=1, metadata={"help": "validate every N epochs"}
)
validate_interval_updates: int = field(
default=0, metadata={"help": "validate every N updates"}
)
validate_after_updates: int = field(
default=0, metadata={"help": "dont validate until reaching this many updates"}
)
fixed_validation_seed: Optional[int] = field(
default=None, metadata={"help": "specified random seed for validation"}
)
disable_validation: bool = field(
default=False, metadata={"help": "disable validation"}
)
max_tokens_valid: Optional[int] = field(
default=None,
metadata={
"help": "maximum number of tokens in a validation batch"
" (defaults to --max-tokens)"
},
)
max_sentences_valid: Optional[int] = field(
default=None,
metadata={
"help": "maximum number of sentences in a validation batch"
" (defaults to --max-sentences)"
},
)
curriculum: int = field(
default=0, metadata={"help": "don't shuffle batches for first N epochs"}
)
gen_subset: str = field(
default="test",
metadata={"help": "data subset to generate (train, valid, test)"},
)
num_shards: int = field(
default=1, metadata={"help": "shard generation over N shards"}
)
shard_id: int = field(
default=0, metadata={"help": "id of the shard to generate (id < num_shards)"}
)
@dataclass
class OptimizationParams(FairseqDataclass):
max_epoch: int = field(
default=0, metadata={"help": "force stop training at specified epoch"}
)
max_update: int = field(
default=0, metadata={"help": "force stop training at specified update"}
)
stop_time_hours: float = field(
default=0,
metadata={
"help": "force stop training after specified cumulative time (if >0)"
},
)
clip_norm: float = field(
default=25.0, metadata={"help": "clip threshold of gradients"}
)
sentence_avg: bool = field(
default=False,
metadata={
"help": "normalize gradients by the number of sentences in a batch"
" (default is to normalize by number of tokens)"
},
)
update_freq: List[int] = field(
default_factory=lambda: [1],
metadata={"help": "update parameters every N_i batches, when in epoch i"},
)
lr: List[float] = field(
default_factory=lambda: [0.25],
metadata={
"help": "learning rate for the first N epochs; all epochs >N using LR_N"
" (note: this may be interpreted differently depending on --lr-scheduler)"
},
)
min_lr: float = field(
default=-1.0,
metadata={"help": "stop training when the learning rate reaches this minimum"},
)
use_bmuf: bool = field(
default=False,
metadata={
"help": "specify global optimizer for syncing models on different GPUs/shards"
},
)
@dataclass
class CheckpointParams(FairseqDataclass):
save_dir: str = field(
default="checkpoints", metadata={"help": "path to save checkpoints"}
)
restore_file: str = field(
default="checkpoint_last.pt",
metadata={
"help": "filename from which to load checkpoint "
"(default: <save-dir>/checkpoint_last.pt"
},
)
finetune_from_model: Optional[str] = field(
default=None,
metadata={
"help": "finetune from a pretrained model; note that meters and lr scheduler will be reset"
},
)
reset_dataloader: bool = field(
default=False,
metadata={
"help": "if set, does not reload dataloader state from the checkpoint"
},
)
reset_lr_scheduler: bool = field(
default=False,
metadata={
"help": "if set, does not load lr scheduler state from the checkpoint"
},
)
reset_meters: bool = field(
default=False,
metadata={"help": "if set, does not load meters from the checkpoint"},
)
reset_optimizer: bool = field(
default=False,
metadata={"help": "if set, does not load optimizer state from the checkpoint"},
)
optimizer_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override optimizer args when loading a checkpoint"
},
)
save_interval: int = field(
default=1, metadata={"help": "save a checkpoint every N epochs"}
)
save_interval_updates: int = field(
default=0, metadata={"help": "save a checkpoint (and validate) every N updates"}
)
keep_interval_updates: int = field(
default=-1,
metadata={
"help": "keep the last N checkpoints saved with --save-interval-updates"
},
)
keep_last_epochs: int = field(
default=-1, metadata={"help": "keep last N epoch checkpoints"}
)
keep_best_checkpoints: int = field(
default=-1, metadata={"help": "keep best N checkpoints based on scores"}
)
no_save: bool = field(
default=False, metadata={"help": "don't save models or checkpoints"}
)
no_epoch_checkpoints: bool = field(
default=False, metadata={"help": "only store last and best checkpoints"}
)
no_last_checkpoints: bool = field(
default=False, metadata={"help": "don't store last checkpoints"}
)
no_save_optimizer_state: bool = field(
default=False,
metadata={"help": "don't save optimizer-state as part of checkpoint"},
)
best_checkpoint_metric: str = field(
default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'}
)
maximize_best_checkpoint_metric: bool = field(
default=False,
metadata={
"help": 'select the largest metric value for saving "best" checkpoints'
},
)
patience: int = field(
default=-1,
metadata={
"help": (
"early stop training if valid performance doesn't "
"improve for N consecutive validation runs; note "
"that this is influenced by --validate-interval"
)
},
)
@dataclass
class CommonEvalParams(FairseqDataclass):
path: Optional[str] = field(
default=None, metadata={"help": "path(s) to model file(s), colon separated"}
)
remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE tokens before scoring (can be set to sentencepiece)"
},
)
quiet: bool = field(default=False, metadata={"help": "only print final scores"})
model_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override model args at generation that were used during model training"
},
)
results_path: Optional[str] = field(
default=None, metadata={"help": "path to save eval results (optional)"}
)
@dataclass
class EvalLMParams(FairseqDataclass):
output_word_probs: bool = field(
default=False,
metadata={
"help": "if set, outputs words and their predicted log probabilities to standard output"
},
)
output_word_stats: bool = field(
default=False,
metadata={
"help": "if set, outputs word statistics such as word count, average probability, etc"
},
)
context_window: int = field(
default=0,
metadata={
"help": "ensures that every evaluated token has access to a context of at least this size, if possible"
},
)
softmax_batch: int = field(
default=sys.maxsize,
metadata={
"help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory"
},
)
@dataclass
class TrainingConfig(FairseqDataclass):
"""Config for training, a composition of training params"""
common: CommonParams = CommonParams()
distributed_training: DistributedTrainingParams = DistributedTrainingParams()
dataset: DatasetParams = DatasetParams()
optimization: OptimizationParams = OptimizationParams()
checkpoint: CheckpointParams = CheckpointParams()
bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
@dataclass
class EvalLMConfig(FairseqDataclass):
"""Config for eval lm, a composition of eval_lm params"""
common: CommonParams = CommonParams()
distributed_training: DistributedTrainingParams = DistributedTrainingParams()
dataset: DatasetParams = DatasetParams()
optimization: OptimizationParams = OptimizationParams()
checkpoint: CheckpointParams = CheckpointParams()
bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
common_eval: CommonEvalParams = CommonEvalParams()
eval_lm: EvalLMParams = EvalLMParams()
def register_params_dataclass(
cs: ConfigStore, name: str, group: str, data_class: Type[FairseqDataclass]
) -> None:
"""register params dataclass in config store"""
node_ = data_class(_name=data_class.name())
cs.store(name=name, group=group, node=node_)
def register_module_dataclass(
cs: ConfigStore, registry: Dict[str, Any], group: str
) -> None:
"""register dataclasses defined in modules in config store, for example, in migrated tasks, models, etc."""
# note that if `group == model`, we register all model archs, not the model name.
for k, v in registry.items():
if v is not None:
node_ = v(_name=k)
cs.store(name=k, group=group, node=node_)
def register_training_hydra_cfg(cs: ConfigStore, name: str = "default") -> None:
"""cs: config store instance, register common training configs"""
register_params_dataclass(
cs, name="training_params", group="params", data_class=TrainingConfig
)
register_module_dataclass(cs, TASK_DATACLASS_REGISTRY, "task")
register_module_dataclass(cs, ARCH_DATACLASS_REGISTRY, "model")
register_module_dataclass(cs, CRITERION_DATACLASS_REGISTRY, "criterion")
register_module_dataclass(cs, OPTIMIZER_DATACLASS_REGISTRY, "optimizer")
register_module_dataclass(cs, LR_SCHEDULER_DATACLASS_REGISTRY, "lr_scheduler")
def register_eval_lm_hydra_cfg(cs: ConfigStore, name: str = "default") -> None:
"""cs: config store instance, register common training configs"""
register_params_dataclass(
cs, name="eval_lm_params", group="params", data_class=EvalLMConfig
)
register_module_dataclass(cs, TASK_DATACLASS_REGISTRY, "task")
register_module_dataclass(cs, CRITERION_DATACLASS_REGISTRY, "criterion")
register_module_dataclass(cs, OPTIMIZER_DATACLASS_REGISTRY, "optimizer")
register_module_dataclass(cs, LR_SCHEDULER_DATACLASS_REGISTRY, "lr_scheduler")
def _override_attr(
sub_node: str, data_class: Type[FairseqDataclass], args: Namespace
) -> List[str]:
overrides = []
for k in data_class.__dataclass_fields__.keys():
if k == "_name":
# private member, skip
continue
if not hasattr(args, k):
# print(f"cannot override {sub_node}.{k} since args does not have attribute {k}")
continue
if getattr(args, k) is None:
overrides.append("{}.{}=null".format(sub_node, k))
elif getattr(args, k) == "":
overrides.append("{}.{}=''".format(sub_node, k))
elif isinstance(getattr(args, k), str):
if (
getattr(args, k).startswith("[")
or getattr(args, k).startswith("(")
or getattr(args, k).startswith("{")
or ("," in getattr(args, k))
):
overrides.append("{}.{}='{}'".format(sub_node, k, getattr(args, k)))
else:
overrides.append("{}.{}={}".format(sub_node, k, getattr(args, k)))
else:
overrides.append("{}.{}={}".format(sub_node, k, getattr(args, k)))
return overrides
def override_training_args(args: Namespace) -> Tuple[List[str], List[str]]:
overrides = []
overrides.extend(_override_attr("params.common", CommonParams, args))
overrides.extend(_override_attr("params.dataset", DatasetParams, args))
overrides.extend(
_override_attr("params.distributed_training", DistributedTrainingParams, args)
)
overrides.extend(_override_attr("params.optimization", OptimizationParams, args))
overrides.extend(_override_attr("params.checkpoint", CheckpointParams, args))
overrides.extend(_override_attr("params.bmuf", FairseqBMUFConfig, args))
module_overrides, module_deletes = override_module_args(args)
overrides.extend(module_overrides)
return overrides, module_deletes
def override_eval_lm_args(args: Namespace) -> Tuple[List[str], List[str]]:
overrides = []
overrides.extend(_override_attr("params.common", CommonParams, args))
overrides.extend(_override_attr("params.dataset", DatasetParams, args))
overrides.extend(
_override_attr("params.distributed_training", DistributedTrainingParams, args)
)
overrides.extend(_override_attr("params.common_eval", CommonEvalParams, args))
overrides.extend(_override_attr("params.eval_lm", EvalLMParams, args))
overrides.extend(_override_attr("params.bmuf", FairseqBMUFConfig, args))
module_overrides, module_deletes = override_module_args(args)
overrides.extend(module_overrides)
return overrides, module_deletes
def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]:
"""use the field in args to overrides those in cfg"""
overrides = []
deletes = []
if args is not None:
assert (
hasattr(args, "task")
and hasattr(args, "criterion")
and hasattr(args, "optimizer")
and hasattr(args, "lr_scheduler")
)
if args.task in TASK_DATACLASS_REGISTRY:
overrides.append("task={}".format(args.task))
overrides.append("task._name={}".format(args.task))
overrides.extend(
_override_attr("task", TASK_DATACLASS_REGISTRY[args.task], args)
)
else:
deletes.append("task")
if args.criterion in CRITERION_DATACLASS_REGISTRY:
overrides.append("criterion={}".format(args.criterion))
overrides.append("criterion._name={}".format(args.criterion))
overrides.extend(
_override_attr(
"criterion", CRITERION_DATACLASS_REGISTRY[args.criterion], args
)
)
else:
deletes.append("criterion")
if args.optimizer in OPTIMIZER_DATACLASS_REGISTRY:
overrides.append("optimizer={}".format(args.optimizer))
overrides.append("optimizer._name={}".format(args.optimizer))
overrides.extend(
_override_attr(
"optimizer", OPTIMIZER_DATACLASS_REGISTRY[args.optimizer], args
)
)
else:
deletes.append("optimizer")
if args.lr_scheduler in LR_SCHEDULER_DATACLASS_REGISTRY:
overrides.append("lr_scheduler={}".format(args.lr_scheduler))
overrides.append("lr_scheduler._name={}".format(args.lr_scheduler))
overrides.extend(
_override_attr(
"lr_scheduler",
LR_SCHEDULER_DATACLASS_REGISTRY[args.lr_scheduler],
args,
)
)
else:
deletes.append("lr_scheduler")
if hasattr(args, "arch"):
if args.arch in ARCH_DATACLASS_REGISTRY:
overrides.append("model={}".format(args.arch))
overrides.append("model._name={}".format(args.arch))
# override model params with those exist in args
overrides.extend(
_override_attr("model", ARCH_DATACLASS_REGISTRY[args.arch], args)
)
else:
deletes.append("model")
return overrides, deletes
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/dataclass/utils.py | fairseq/dataclass/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, MISSING
from typing import Any, List, Optional, Dict
from enum import Enum
from argparse import ArgumentParser
def eval_str_list(x, x_type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(x_type, x))
except TypeError:
return [x_type(x)]
class StrEnum(Enum):
def __str__(self):
return self.value
def __eq__(self, other: str):
return self.value == other
def __repr__(self):
return self.value
def ChoiceEnum(choices: List[str]):
"""return the Enum class used to enforce list of choices"""
return StrEnum("Choices", {k: k for k in choices})
@dataclass
class FairseqDataclass:
"""fairseq base dataclass that supported fetching attributes and metas"""
_name: Optional[str] = None
@staticmethod
def name():
return None
def _get_all_attributes(self) -> List[str]:
return [k for k in self.__dataclass_fields__.keys()]
def _get_meta(
self, attribute_name: str, meta: str, default: Optional[Any] = None
) -> Any:
return self.__dataclass_fields__[attribute_name].metadata.get(meta, default)
def _get_name(self, attribute_name: str) -> str:
return self.__dataclass_fields__[attribute_name].name
def _get_default(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith("${"):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith(
"${"
):
return str(self.__dataclass_fields__[attribute_name].default)
elif (
getattr(self, attribute_name)
!= self.__dataclass_fields__[attribute_name].default
):
return getattr(self, attribute_name)
return self.__dataclass_fields__[attribute_name].default
def _get_default_factory(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith("${"):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith(
"${"
):
return str(self.__dataclass_fields__[attribute_name].default)
elif (
getattr(self, attribute_name)
!= self.__dataclass_fields__[attribute_name].default_factory()
):
return getattr(self, attribute_name)
return self.__dataclass_fields__[attribute_name].default_factory()
def _get_type(self, attribute_name: str) -> Any:
return self.__dataclass_fields__[attribute_name].type
def _get_help(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "help")
def _get_choices(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "choices")
def gen_parser_from_dataclass(
parser: ArgumentParser,
dataclass_instance: FairseqDataclass,
delete_default: bool = False,
) -> None:
"""convert a dataclass instance to tailing parser arguments"""
import re
def argparse_name(name: str):
if name == "data":
# normally data is positional args
return name
if name == "_name":
# private member, skip
return None
return "--" + name.replace("_", "-")
def interpret_dc_type(field_type):
if isinstance(field_type, str):
raise RuntimeError()
typestring = str(field_type)
if re.match(r"(typing.|^)Union\[(.*), NoneType\]$", typestring):
return field_type.__args__[0]
return field_type
def get_kwargs_from_dc(
dataclass_instance: FairseqDataclass, k: str
) -> Dict[str, Any]:
"""k: dataclass attributes"""
field_type = dataclass_instance._get_type(k)
inter_type = interpret_dc_type(field_type)
if isinstance(inter_type, type) and issubclass(inter_type, List):
field_default = dataclass_instance._get_default_factory(k)
else:
field_default = dataclass_instance._get_default(k)
if isinstance(inter_type, type) and issubclass(inter_type, Enum):
field_choices = [t.value for t in list(inter_type)]
else:
field_choices = None
field_help = dataclass_instance._get_help(k)
kwargs = {}
if isinstance(field_default, str) and field_default.startswith("${"):
kwargs["default"] = field_default
else:
if field_default is MISSING:
kwargs["required"] = True
if field_choices is not None:
kwargs["choices"] = field_choices
if (isinstance(inter_type, type) and issubclass(inter_type, List)) or (
"List" in str(inter_type)
):
if "int" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, int)
elif "float" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, float)
elif "str" in str(inter_type):
kwargs["type"] = lambda x: eval_str_list(x, str)
else:
raise NotImplementedError()
if field_default is not MISSING:
kwargs["default"] = ",".join(map(str, field_default))
elif (isinstance(inter_type, type) and issubclass(inter_type, Enum)) or (
"Enum" in str(inter_type)
):
kwargs["type"] = str
if field_default is not MISSING:
if isinstance(field_default, Enum):
kwargs["default"] = field_default.value
else:
kwargs["default"] = field_default
elif inter_type is bool:
kwargs["action"] = (
"store_false" if field_default is True else "store_true"
)
kwargs["default"] = field_default
else:
kwargs["type"] = inter_type
if field_default is not MISSING:
kwargs["default"] = field_default
kwargs["help"] = field_help
return kwargs
for k in dataclass_instance._get_all_attributes():
field_name = argparse_name(dataclass_instance._get_name(k))
if field_name is None:
continue
kwargs = get_kwargs_from_dc(dataclass_instance, k)
if isinstance(kwargs["default"], str) and kwargs["default"].startswith("${"):
continue
if delete_default:
del kwargs["default"]
parser.add_argument(field_name, **kwargs)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/dataclass/__init__.py | fairseq/dataclass/__init__.py | python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false | |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/logging/metrics.py | fairseq/logging/metrics.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A standalone module for aggregating metrics.
Metrics can be logged from anywhere using the `log_*` functions defined
in this module. The logged values will be aggregated dynamically based
on the aggregation context in which the logging occurs. See the
:func:`aggregate` context manager for more details.
"""
from collections import defaultdict, OrderedDict
import contextlib
import time
from typing import Callable, Dict, List, Optional
import uuid
from .meters import *
# Aggregation contexts are considered "active" when inside the scope
# created by the :func:`aggregate` context manager.
_aggregators = OrderedDict()
_active_aggregators = OrderedDict()
_active_aggregators_cnt = defaultdict(lambda: 0)
def reset() -> None:
"""Reset all metrics aggregators."""
_aggregators.clear()
_active_aggregators.clear()
_active_aggregators_cnt.clear()
# The "default" aggregator observes all logged values.
_aggregators["default"] = MetersDict()
_active_aggregators["default"] = _aggregators["default"]
_active_aggregators_cnt["default"] = 1
reset()
@contextlib.contextmanager
def aggregate(name: Optional[str] = None, new_root: bool = False):
"""Context manager to aggregate metrics under a given name.
Aggregations can be nested. If *new_root* is ``False``, then logged
metrics will be recorded along the entire stack of nested
aggregators, including a global "default" aggregator. If *new_root*
is ``True``, then this aggregator will be the root of a new
aggregation stack, thus bypassing any parent aggregators.
Note that aggregation contexts are uniquely identified by their
*name* (e.g., train, valid). Creating a context with an existing
name will reuse the corresponding :class:`MetersDict` instance.
If no name is given, then a temporary aggregator will be created.
Usage::
with metrics.aggregate("train"):
for step, batch in enumerate(epoch):
with metrics.aggregate("train_inner") as agg:
metrics.log_scalar("loss", get_loss(batch))
if step % log_interval == 0:
print(agg.get_smoothed_value("loss"))
agg.reset()
print(metrics.get_smoothed_values("train")["loss"])
Args:
name (str): name of the aggregation. Defaults to a
random/temporary name if not given explicitly.
new_root (bool): make this aggregation the root of a new
aggregation stack.
"""
if name is None:
# generate a temporary name
name = str(uuid.uuid4())
assert name not in _aggregators
agg = MetersDict()
else:
assert name != "default"
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
yield agg
_active_aggregators_cnt[name] -= 1
if _active_aggregators_cnt[name] == 0 and name in _active_aggregators:
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
def log_scalar(
key: str,
value: float,
weight: float = 1,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value.
Args:
key (str): name of the field to log
value (float): value to log
weight (float): weight that this value contributes to the average.
A weight of 0 will always log the latest value.
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20):
"""Log a scalar value derived from other meters.
Args:
key (str): name of the field to log
fn (Callable[[MetersDict], float]): function that takes a single
argument *meters* and returns the derived value
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
def log_speed(
key: str,
value: float,
priority: int = 30,
round: Optional[int] = None,
):
"""Log the rate of some quantity per second.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset() # reset meter on the first call
else:
agg[key].update(value)
def log_start_time(key: str, priority: int = 40, round: Optional[int] = None):
"""Log the duration of some event in seconds.
The duration will be computed once :func:`log_stop_time` is called.
Args:
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
def log_stop_time(key: str, weight: float = 0., prehook=None):
"""Log the duration of some event in seconds.
The duration will be computed since :func:`log_start_time` was called.
Set weight > 0 to report the average time instead of the sum.
Args:
key (str): name of the field to log
weight (float): weight that this time contributes to the average
prehook (function, no arguments): will be called before the timer
is stopped. For example, use prehook=torch.cuda.synchronize to
make sure all gpu operations are done before timer is stopped.
"""
for agg in get_active_aggregators():
if key in agg:
agg[key].stop(weight, prehook)
def log_custom(
new_meter_fn: Callable[[], Meter],
key: str,
*args,
priority: int = 50,
**kwargs,
):
"""Log using a custom Meter.
Any extra *args* or *kwargs* will be passed through to the Meter's
*update* method.
Args:
new_meter_fn (Callable[[], Meter]): function that returns a new
Meter instance
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
def reset_meter(name: str, key: str) -> None:
"""Reset Meter instance aggregated under a given *name* and *key*."""
meter = get_meter(name, key)
if meter is not None:
meter.reset()
def reset_meters(name: str) -> None:
"""Reset Meter instances aggregated under a given *name*."""
meters = get_meters(name)
if meters is not None:
meters.reset()
def get_meter(name: str, key: str) -> Meter:
"""Get a single Meter instance aggregated under *name* and *key*.
Returns:
Meter or None if no metrics have been logged under *name* and *key*.
"""
if name not in _aggregators:
return None
return _aggregators[name].get(key, None)
def get_meters(name: str) -> MetersDict:
"""Get Meter instances aggregated under a given *name*.
Returns:
MetersDict or None if no metrics have been logged under *name*.
"""
return _aggregators.get(name, None)
def get_smoothed_value(name: str, key: str) -> float:
"""Get a single smoothed value.
Raises:
KeyError: if no metrics have been logged under *name* and *key*.
"""
return _aggregators[name].get_smoothed_value(key)
def get_smoothed_values(name: str) -> Dict[str, float]:
"""Get smoothed values aggregated under a given *name*.
Raises:
KeyError: if no metrics have been logged under *name*.
"""
return _aggregators[name].get_smoothed_values()
def state_dict():
return OrderedDict([
(name, agg.state_dict())
for name, agg in _aggregators.items()
])
def load_state_dict(state_dict):
for name, agg_state in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/logging/__init__.py | fairseq/logging/__init__.py | python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false | |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/logging/meters.py | fairseq/logging/meters.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
from collections import OrderedDict
import time
from typing import Dict, Optional
try:
import torch
def type_as(a, b):
if torch.is_tensor(a) and torch.is_tensor(b):
return a.to(b)
else:
return a
except ImportError:
torch = None
def type_as(a, b):
return a
try:
import numpy as np
except ImportError:
np = None
class Meter(object):
"""Base class for Meters."""
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
"""Smoothed value used for logging."""
raise NotImplementedError
def safe_round(number, ndigits):
if hasattr(number, '__round__'):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, 'item'):
return safe_round(number.item(), ndigits)
else:
return number
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.reset()
def reset(self):
self.val = None # most recent update
self.sum = 0 # sum from all updates
self.count = 0 # total n from all updates
def update(self, val, n=1):
if val is not None:
self.val = val
if n > 0:
self.sum = type_as(self.sum, val) + (val * n)
self.count = type_as(self.count, n) + n
def state_dict(self):
return {
'val': self.val,
'sum': self.sum,
'count': self.count,
'round': self.round,
}
def load_state_dict(self, state_dict):
self.val = state_dict['val']
self.sum = state_dict['sum']
self.count = state_dict['count']
self.round = state_dict.get('round', None)
@property
def avg(self):
return self.sum / self.count if self.count > 0 else self.val
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class TimeMeter(Meter):
"""Computes the average occurrence of some event per second"""
def __init__(
self,
init: int = 0,
n: int = 0,
round: Optional[int] = None,
):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
self.i = 0
def update(self, val=1):
self.n = type_as(self.n, val) + val
self.i += 1
def state_dict(self):
return {
'init': self.elapsed_time,
'n': self.n,
'round': self.round,
}
def load_state_dict(self, state_dict):
if 'start' in state_dict:
# backwards compatibility for old state_dicts
self.reset(init=state_dict['init'])
else:
self.reset(init=state_dict['init'], n=state_dict['n'])
self.round = state_dict.get('round', None)
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.perf_counter() - self.start)
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
'sum': self.sum,
'n': self.n,
'round': self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.n = state_dict['n']
self.start_time = None
self.round = state_dict.get('round', None)
@property
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
@property
def elapsed_time(self):
if self.start_time is None:
return 0.
return time.perf_counter() - self.start_time
@property
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class MetersDict(OrderedDict):
"""A sorted dictionary of :class:`Meters`.
Meters are sorted according to a priority that is given when the
meter is first added to the dictionary.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.priorities = []
def __setitem__(self, key, value):
assert key not in self, "MetersDict doesn't support reassignment"
priority, value = value
bisect.insort(self.priorities, (priority, len(self.priorities), key))
super().__setitem__(key, value)
for _, _, key in self.priorities: # reorder dict to match priorities
self.move_to_end(key)
def add_meter(self, key, meter, priority):
self.__setitem__(key, (priority, meter))
def state_dict(self):
return [
(pri, key, self[key].__class__.__name__, self[key].state_dict())
for pri, _, key in self.priorities
# can't serialize DerivedMeter instances
if not isinstance(self[key], MetersDict._DerivedMeter)
]
def load_state_dict(self, state_dict):
self.clear()
self.priorities.clear()
for pri, key, meter_cls, meter_state in state_dict:
meter = globals()[meter_cls]()
meter.load_state_dict(meter_state)
self.add_meter(key, meter, pri)
def get_smoothed_value(self, key: str) -> float:
"""Get a single smoothed value."""
meter = self[key]
if isinstance(meter, MetersDict._DerivedMeter):
return meter.fn(self)
else:
return meter.smoothed_value
def get_smoothed_values(self) -> Dict[str, float]:
"""Get all smoothed values."""
return OrderedDict([
(key, self.get_smoothed_value(key))
for key in self.keys()
if not key.startswith("_")
])
def reset(self):
"""Reset Meter instances."""
for meter in self.values():
if isinstance(meter, MetersDict._DerivedMeter):
continue
meter.reset()
class _DerivedMeter(Meter):
"""A Meter whose values are derived from other Meters."""
def __init__(self, fn):
self.fn = fn
def reset(self):
pass
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/logging/progress_bar.py | fairseq/logging/progress_bar.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
import atexit
import json
import logging
import os
import sys
from collections import OrderedDict
from contextlib import contextmanager
from numbers import Number
from typing import Optional
import torch
from .meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = 'tqdm',
):
if log_format is None:
log_format = default_log_format
if log_format == 'tqdm' and not sys.stderr.isatty():
log_format = 'simple'
if log_format == 'json':
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'none':
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == 'simple':
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'tqdm':
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
return bar
def build_progress_bar(
args,
iterator,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default: str = 'tqdm',
no_progress_bar: str = 'none',
):
"""Legacy wrapper that takes an argparse.Namespace."""
if getattr(args, 'no_progress_bar', False):
default = no_progress_bar
if getattr(args, 'distributed_rank', 0) == 0:
tensorboard_logdir = getattr(args, 'tensorboard_logdir', None)
else:
tensorboard_logdir = None
return progress_bar(
iterator,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=tensorboard_logdir,
default_log_format=default,
)
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class BaseProgressBar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.n = getattr(iterable, 'n', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class JsonProgressBar(BaseProgressBar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if (
step > 0
and self.log_interval is not None
and step % self.log_interval == 0
):
update = (
self.epoch - 1 + (self.i + 1) / float(self.size)
if self.epoch is not None
else None
)
stats = self._format_stats(stats, epoch=self.epoch, update=update)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class NoopProgressBar(BaseProgressBar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class SimpleProgressBar(BaseProgressBar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.i = None
self.size = None
def __iter__(self):
self.size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.n):
self.i = i
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
step = step or self.i or 0
if (
step > 0
and self.log_interval is not None
and step % self.log_interval == 0
):
stats = self._format_stats(stats)
postfix = self._str_commas(stats)
with rename_logger(logger, tag):
logger.info(
'{}: {:5d} / {:d} {}'
.format(self.prefix, self.i + 1, self.size, postfix)
)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class TqdmProgressBar(BaseProgressBar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(
iterable,
self.prefix,
leave=False,
disable=(logger.getEffectiveLevel() > logging.INFO),
)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
try:
_tensorboard_writers = {}
from tensorboardX import SummaryWriter
except ImportError:
SummaryWriter = None
def _close_writers():
for w in _tensorboard_writers.values():
w.close()
atexit.register(_close_writers)
class TensorboardProgressBarWrapper(BaseProgressBar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
if SummaryWriter is None:
logger.warning(
"tensorboard not found, please install with: pip install tensorboardX"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
writer.flush()
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/benchmark/dummy_mt.py | fairseq/benchmark/dummy_mt.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import register_task, LegacyFairseqTask
logger = logging.getLogger(__name__)
@register_task('dummy_mt')
class DummyMTTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--dict-size', default=49996, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--src-len', default=30, type=int)
parser.add_argument('--tgt-len', default=30, type=int)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1
self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task. """
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
logger.info('dictionary: {} types'.format(len(dictionary)))
args.max_source_positions = args.src_len + dictionary.pad() + 2
args.max_target_positions = args.tgt_len + dictionary.pad() + 2
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
item_size = max(self.args.src_len, self.args.tgt_len)
if self.args.max_sentences is not None:
bsz = self.args.max_sentences
else:
bsz = max(1, self.args.max_tokens // item_size)
tgt = torch.stack([self.dummy_tgt for _ in range(bsz)])
self.datasets[split] = DummyDataset(
{
'id': 1,
'net_input': {
'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]),
'src_lengths': torch.full(
(bsz, ), self.args.src_len, dtype=torch.long
),
'prev_output_tokens': tgt.clone(),
},
'target': tgt,
'nsentences': bsz,
'ntokens': bsz * self.args.tgt_len,
},
num_items=self.args.dataset_size,
item_size=item_size,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/benchmark/dummy_model.py | fairseq/benchmark/dummy_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.data import Dictionary
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
@register_model('dummy_model')
class DummyModel(FairseqLanguageModel):
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
@staticmethod
def add_args(parser):
parser.add_argument('--num-layers', type=int, default=24)
parser.add_argument('--embed-dim', type=int, default=1024)
@classmethod
def build_model(cls, args, task):
encoder = DummyEncoder(
num_embed=len(task.target_dictionary),
embed_dim=args.embed_dim,
num_layers=args.num_layers,
)
return cls(args, encoder)
def forward(self, src_tokens, masked_tokens=None, **kwargs):
return self.decoder(src_tokens, masked_tokens=masked_tokens)
class DummyEncoder(FairseqDecoder):
def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24):
super().__init__(Dictionary())
self.embed = nn.Embedding(
num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0
)
self.layers_a = nn.ModuleList([
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 3*embed_dim), # q, k, v input projection
nn.Linear(3*embed_dim, embed_dim), # skip self-attention
nn.Linear(embed_dim, embed_dim), # output projection
nn.Dropout(),
)
for i in range(num_layers)
])
self.layers_b = nn.ModuleList([
nn.Sequential(
nn.LayerNorm(embed_dim),
nn.Linear(embed_dim, 4*embed_dim), # FFN
nn.ReLU(),
nn.Linear(4*embed_dim, embed_dim), # FFN
nn.Dropout(0.1),
)
for i in range(num_layers)
])
self.out_proj = nn.Linear(embed_dim, num_embed)
def forward(self, tokens, masked_tokens=None):
x = self.embed(tokens)
for layer_a, layer_b in zip(self.layers_a, self.layers_b):
x = x + layer_a(x)
x = x + layer_b(x)
x = self.out_proj(x)
if masked_tokens is not None:
x = x[masked_tokens]
return (x,)
def max_positions(self):
return 1024
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
@register_model_architecture('dummy_model', 'dummy_model')
def base_architecture(args):
pass
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/benchmark/dummy_masked_lm.py | fairseq/benchmark/dummy_masked_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import register_task, LegacyFairseqTask
logger = logging.getLogger(__name__)
@register_task('dummy_masked_lm')
class DummyMaskedLMTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--dict-size', default=49995, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
mask_idx = 0
pad_idx = 1
seq = torch.arange(args.tokens_per_sample) + pad_idx + 1
mask = torch.arange(2, args.tokens_per_sample, 7) # ~15%
src = seq.clone()
src[mask] = mask_idx
tgt = torch.full_like(seq, pad_idx)
tgt[mask] = seq[mask]
self.dummy_src = src
self.dummy_tgt = tgt
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task. """
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.args.max_sentences is not None:
bsz = self.args.max_sentences
else:
bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
'id': 1,
'net_input': {
'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]),
'src_lengths': torch.full(
(bsz, ), self.args.tokens_per_sample, dtype=torch.long
),
},
'target': torch.stack([self.dummy_tgt for _ in range(bsz)]),
'nsentences': bsz,
'ntokens': bsz * self.args.tokens_per_sample,
},
num_items=self.args.dataset_size,
item_size=self.args.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/benchmark/dummy_lm.py | fairseq/benchmark/dummy_lm.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import Dictionary, FairseqDataset
from fairseq.tasks import register_task, LegacyFairseqTask
logger = logging.getLogger(__name__)
@register_task('dummy_lm')
class DummyLMTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--dict-size', default=49996, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments '
'per sample for BERT dataset')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
seq = torch.arange(args.tokens_per_sample + 1) + dictionary.pad() + 1
self.dummy_src = seq[:-1]
self.dummy_tgt = seq[1:]
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task. """
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if self.args.max_sentences is not None:
bsz = self.args.max_sentences
else:
bsz = max(1, self.args.max_tokens // self.args.tokens_per_sample)
self.datasets[split] = DummyDataset(
{
'id': 1,
'net_input': {
'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]),
'src_lengths': torch.full(
(bsz, ), self.args.tokens_per_sample, dtype=torch.long
),
},
'target': torch.stack([self.dummy_tgt for _ in range(bsz)]),
'nsentences': bsz,
'ntokens': bsz * self.args.tokens_per_sample,
},
num_items=self.args.dataset_size,
item_size=self.args.tokens_per_sample,
)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
@property
def sizes(self):
return np.array([self.item_size] * self.num_items)
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
@property
def supports_prefetch(self):
return False
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/benchmark/__init__.py | fairseq/benchmark/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# import models/tasks to register them
from . import ( # noqa
dummy_lm,
dummy_masked_lm,
dummy_model,
dummy_mt,
)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/models/distributed_fairseq_model.py | fairseq/models/distributed_fairseq_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import torch.nn as nn
from fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel
from fairseq.models import BaseFairseqModel
_GOSSIP_DISABLED = False
try:
import gossip
except ImportError:
_GOSSIP_DISABLED = True
def DistributedFairseqModel(args, model, process_group=None):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
"""
# determine which DDP class to extend
assert isinstance(model, nn.Module)
if args.distributed_wrapper == 'DDP' and args.ddp_backend == 'c10d':
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
)
# Maintain backward compatibility
if 'check_reduction' in inspect.getargspec(ddp_class)[0]:
init_kwargs['check_reduction'] = True
if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]:
init_kwargs['find_unused_parameters'] = args.find_unused_parameters
elif args.distributed_wrapper == 'DDP' and args.ddp_backend == 'no_c10d':
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(
module=model,
world_size=args.distributed_world_size,
buffer_size=2**28,
process_group=process_group,
)
elif args.distributed_wrapper == 'SlowMo':
if _GOSSIP_DISABLED:
raise ImportError(
'Cannot find gossip library. Please install from: '
'github.com/facebookresearch/stochastic_gradient_push'
)
ddp_class = gossip.GossipDataParallel
# The values of slowmo_momentum below were obtained by tuning on the
# En-De 16 dataset by training the transformer_wmt_en_de_large model
if args.slowmo_momentum is None:
if args.distributed_world_size <= 16:
args.slowmo_momentum = 0.0
elif args.distributed_world_size <= 32:
args.slowmo_momentum = 0.2
elif args.distributed_world_size <= 64:
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
init_kwargs = dict(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
nprocs_per_node=args.nprocs_per_node,
slowmo_momentum=args.slowmo_momentum,
localsgd=(args.slowmo_algorithm == 'LocalSGD'),
localsgd_frequency=args.localsgd_frequency
)
else:
raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend)
class _DistributedFairseqModel(ddp_class):
"""Extend DistributedDataParallel to check for missing
attributes in the wrapped module."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
ofirpress/shortformer | https://github.com/ofirpress/shortformer/blob/edc411ff896ae042c01d939a32c1e4a33e238083/fairseq/models/multilingual_transformer.py | fairseq/models/multilingual_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from fairseq import utils
from fairseq.models import (
FairseqMultiModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
base_architecture,
Embedding,
TransformerModel,
TransformerEncoder,
TransformerDecoder,
)
@register_model('multilingual_transformer')
class MultilingualTransformerModel(FairseqMultiModel):
"""Train Transformer models for multiple language pairs simultaneously.
Requires `--task multilingual_translation`.
We inherit all arguments from TransformerModel and assume that all language
pairs use a single Transformer architecture. In addition, we provide several
options that are specific to the multilingual setting.
Args:
--share-encoder-embeddings: share encoder embeddings across all source languages
--share-decoder-embeddings: share decoder embeddings across all target languages
--share-encoders: share all encoder params (incl. embeddings) across all source languages
--share-decoders: share all decoder params (incl. embeddings) across all target languages
"""
def __init__(self, encoders, decoders):
super().__init__(encoders, decoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument('--share-encoder-embeddings', action='store_true',
help='share encoder embeddings across languages')
parser.add_argument('--share-decoder-embeddings', action='store_true',
help='share decoder embeddings across languages')
parser.add_argument('--share-encoders', action='store_true',
help='share encoders across languages')
parser.add_argument('--share-decoders', action='store_true',
help='share decoders across languages')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
# make sure all arguments are present in older models
base_multilingual_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_langs = [lang_pair.split('-')[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
# build shared embeddings (if applicable)
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_all_embeddings:
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=task.langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = (
FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=src_langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = (
FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=tgt_langs,
embed_dim=args.decoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.decoder_embed_path,
)
)
# encoders/decoders for each language
lang_encoders, lang_decoders = {}, {}
def get_encoder(lang):
if lang not in lang_encoders:
if shared_encoder_embed_tokens is not None:
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(
task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path
)
lang_encoders[lang] = TransformerEncoder(args, task.dicts[lang], encoder_embed_tokens)
return lang_encoders[lang]
def get_decoder(lang):
if lang not in lang_decoders:
if shared_decoder_embed_tokens is not None:
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(
task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path
)
lang_decoders[lang] = TransformerDecoder(args, task.dicts[lang], decoder_embed_tokens)
return lang_decoders[lang]
# shared encoders/decoders (if applicable)
shared_encoder, shared_decoder = None, None
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
encoders, decoders = OrderedDict(), OrderedDict()
for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = shared_encoder if shared_encoder is not None else get_encoder(src)
decoders[lang_pair] = shared_decoder if shared_decoder is not None else get_decoder(tgt)
return MultilingualTransformerModel(encoders, decoders)
def load_state_dict(self, state_dict, strict=True, args=None):
state_dict_subset = state_dict.copy()
for k, _ in state_dict.items():
assert k.startswith('models.')
lang_pair = k.split('.')[1]
if lang_pair not in self.models:
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, args=args)
@register_model_architecture('multilingual_transformer', 'multilingual_transformer')
def base_multilingual_architecture(args):
base_architecture(args)
args.share_encoder_embeddings = getattr(args, 'share_encoder_embeddings', False)
args.share_decoder_embeddings = getattr(args, 'share_decoder_embeddings', False)
args.share_encoders = getattr(args, 'share_encoders', False)
args.share_decoders = getattr(args, 'share_decoders', False)
@register_model_architecture('multilingual_transformer', 'multilingual_transformer_iwslt_de_en')
def multilingual_transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_multilingual_architecture(args)
| python | MIT | edc411ff896ae042c01d939a32c1e4a33e238083 | 2026-01-05T07:14:08.244122Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.