code stringlengths 17 6.64M |
|---|
def PrintUsage(message):
'Prints a brief usage string and exits, optionally with an error message.\n\n Args:\n message: The optional error message.\n '
sys.stderr.write(_USAGE)
if message:
sys.exit(('\nFATAL ERROR: ' + message))
else:
sys.exit(1)
|
def PrintCategories():
'Prints a list of all the error-categories used by error messages.\n\n These are the categories used to filter messages via --filter.\n '
sys.stderr.write(''.join(((' %s\n' % cat) for cat in _ERROR_CATEGORIES)))
sys.exit(0)
|
def ParseArguments(args):
'Parses the command line arguments.\n\n This may set the output format and verbosity level as side-effects.\n\n Args:\n args: The command line arguments:\n\n Returns:\n The list of filenames to lint.\n '
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if (opt == '--help'):
PrintUsage(None)
elif (opt == '--output'):
if (val not in ('emacs', 'vs7', 'eclipse')):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif (opt == '--verbose'):
verbosity = int(val)
elif (opt == '--filter'):
filters = val
if (not filters):
PrintCategories()
elif (opt == '--counting'):
if (val not in ('total', 'toplevel', 'detailed')):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif (opt == '--root'):
global _root
_root = val
elif (opt == '--linelength'):
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif (opt == '--extensions'):
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if (not filenames):
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
|
def main():
filenames = ParseArguments(sys.argv[1:])
sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit((_cpplint_state.error_count > 0))
|
def reporthook(count, block_size, total_size):
'\n From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/\n '
global start_time
if (count == 0):
start_time = time.time()
return
duration = (time.time() - start_time)
progress_size = int((count * block_size))
speed = int((progress_size / (1024 * duration)))
percent = int((((count * block_size) * 100) / total_size))
sys.stdout.write(('\r...%d%%, %d MB, %d KB/s, %d seconds passed' % (percent, (progress_size / (1024 * 1024)), speed, duration)))
sys.stdout.flush()
|
def parse_readme_frontmatter(dirname):
readme_filename = os.path.join(dirname, 'readme.md')
with open(readme_filename) as f:
lines = [line.strip() for line in f.readlines()]
top = lines.index('---')
bottom = lines.index('---', (top + 1))
frontmatter = yaml.load('\n'.join(lines[(top + 1):bottom]))
assert all(((key in frontmatter) for key in required_keys))
return (dirname, frontmatter)
|
def valid_dirname(dirname):
try:
return parse_readme_frontmatter(dirname)
except Exception as e:
print('ERROR: {}'.format(e))
raise argparse.ArgumentTypeError('Must be valid Caffe model directory with a correct readme.md')
|
def extract_datetime_from_line(line, year):
line = line.strip().split()
month = int(line[0][1:3])
day = int(line[0][3:])
timestamp = line[1]
pos = timestamp.rfind('.')
ts = [int(x) for x in timestamp[:pos].split(':')]
hour = ts[0]
minute = ts[1]
second = ts[2]
microsecond = int(timestamp[(pos + 1):])
dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)
return dt
|
def get_log_created_year(input_file):
'Get year from log file system timestamp\n '
log_created_time = os.path.getctime(input_file)
log_created_year = datetime.datetime.fromtimestamp(log_created_time).year
return log_created_year
|
def get_start_time(line_iterable, year):
'Find start time from group of lines\n '
start_datetime = None
for line in line_iterable:
line = line.strip()
if (line.find('Solving') != (- 1)):
start_datetime = extract_datetime_from_line(line, year)
break
return start_datetime
|
def extract_seconds(input_file, output_file):
with open(input_file, 'r') as f:
lines = f.readlines()
log_created_year = get_log_created_year(input_file)
start_datetime = get_start_time(lines, log_created_year)
assert start_datetime, 'Start time not found'
out = open(output_file, 'w')
for line in lines:
line = line.strip()
if (line.find('Iteration') != (- 1)):
dt = extract_datetime_from_line(line, log_created_year)
elapsed_seconds = (dt - start_datetime).total_seconds()
out.write(('%f\n' % elapsed_seconds))
out.close()
|
def main(args):
node_to_rank = json.load(open('node_to_rank.json', 'r'))
args.master_addr = {v: k for (k, v) in node_to_rank.items()}[0]
os.environ['MASTER_ADDR'] = args.master_addr
os.environ['MASTER_PORT'] = '10000'
host = socket.gethostbyname(socket.gethostname())
args.distributed_port = '10000'
print('master', args.master_addr, 'host', host, os.environ.get('SLURM_JOB_NODELIST'))
print('OMPI_COMM_WORLD_SIZE', os.environ['OMPI_COMM_WORLD_SIZE'])
print('OMPI_COMM_WORLD_RANK', os.environ['OMPI_COMM_WORLD_RANK'])
print('OMPI_COMM_WORLD_LOCAL_RANK', os.environ['OMPI_COMM_WORLD_LOCAL_RANK'], args.device_id)
exp_id = args.master_addr
args.distributed_init_method = ('file:///shared/share/' + exp_id)
print('| initialized host {} as rank {}'.format(socket.gethostbyname(socket.gethostname()), args.distributed_rank))
single_process_main(0, args)
|
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
' increments counters for the sum of log probs of current word and next\n word (given context ending at current word). Since the next word might be at the end of the example,\n or it might be not counted because it is not an ending subword unit,\n also keeps track of how many of those we have seen '
if (next_word_prob is not None):
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, (self.count - self.missing_next_words))
|
def main(parsed_args):
assert (parsed_args.path is not None), '--path required for evaluation!'
utils.import_user_module(parsed_args)
print(parsed_args)
use_cuda = (torch.cuda.is_available() and (not parsed_args.cpu))
task = tasks.setup_task(parsed_args)
print('| loading model(s) from {}'.format(parsed_args.path))
(models, args) = checkpoint_utils.load_model_ensemble(parsed_args.path.split(':'), arg_overrides=eval(parsed_args.model_overrides), task=task)
for arg in vars(parsed_args).keys():
if (arg not in {'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token'}):
setattr(args, arg, getattr(parsed_args, arg))
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if (args.context_window > 0):
dataset = LMContextWindowDataset(dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad())
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
for model in models:
model.make_generation_fast_()
if args.fp16:
model.half()
if use_cuda:
model.cuda()
assert (len(models) > 0)
print('num. model params: {}'.format(sum((p.numel() for p in models[0].parameters()))))
itr = task.get_batch_iterator(dataset=dataset, max_tokens=(args.max_tokens or 36000), max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers).next_epoch_itr(shuffle=False)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.0
count = 0
if (args.remove_bpe is not None):
if (args.remove_bpe == 'sentencepiece'):
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = set((i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont)))
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
if ('net_input' not in sample):
continue
sample = (utils.move_to_cuda(sample) if use_cuda else sample)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for (i, hypos_i) in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert (hypo['tokens'][0].item() == task.target_dictionary.bos())
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if (bpe_toks is not None):
for i in range((tgt_len - 1)):
if (tokens[i].item() in bpe_toks):
skipped_toks += 1
pos_scores[(i + 1)] += pos_scores[i]
pos_scores[i] = 0
inf_scores = (pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf')))
if inf_scores.any():
print('| Skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()]))
pos_scores = pos_scores[(~ inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += (pos_scores.numel() - skipped_toks)
if (args.output_word_probs or args.output_word_stats):
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if ((bpe_toks is not None) and (w_ind in bpe_toks)):
w = w[:(- bpe_len)]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = (i + 1)
while (ind < len(tokens)):
if (pos_scores[ind].item() != 0):
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
print(((str(int(sample_id)) + ' ') + '\t'.join(('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))))
wps_meter.update(sample['ntokens'])
t.log({'wps': round(wps_meter.avg)})
avg_nll_loss = ((- score_sum) / count)
print('| Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, (1.0 / gen_timer.avg)))
print('| Loss: {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, np.exp(avg_nll_loss)))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=(lambda x: x.count), reverse=True):
print(ws)
|
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
main(args)
|
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos)
|
class Binarizer():
@staticmethod
def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=(- 1)):
(nseq, ntok) = (0, 0)
replaced = Counter()
def replaced_consumer(word, idx):
if ((idx == dict.unk_index) and (word != dict.unk_word)):
replaced.update([word])
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if ((end > 0) and (f.tell() > end)):
break
ids = dict.encode_line(line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=(- 1)):
nseq = 0
with open(filename, 'r') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if ((end > 0) and (f.tell() > end)):
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {'nseq': nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = (size // num_chunks)
offsets = [0 for _ in range((num_chunks + 1))]
for i in range(1, num_chunks):
f.seek((chunk_size * i))
safe_readline(f)
offsets[i] = f.tell()
return offsets
|
class BleuStat(ctypes.Structure):
_fields_ = [('reflen', ctypes.c_size_t), ('predlen', ctypes.c_size_t), ('match1', ctypes.c_size_t), ('count1', ctypes.c_size_t), ('match2', ctypes.c_size_t), ('count2', ctypes.c_size_t), ('match3', ctypes.c_size_t), ('count3', ctypes.c_size_t), ('match4', ctypes.c_size_t), ('count4', ctypes.c_size_t)]
|
class SacrebleuScorer(object):
def __init__(self):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if (order != 4):
raise NotImplementedError
return self.sacrebleu.corpus_bleu(self.sys, [self.ref])
|
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if (not isinstance(ref, torch.IntTensor)):
raise TypeError('ref must be a torch.IntTensor (got {})'.format(type(ref)))
if (not isinstance(pred, torch.IntTensor)):
raise TypeError('pred must be a torch.IntTensor(got {})'.format(type(pred)))
rref = ref.clone()
assert (not rref.lt(0).any())
rref[rref.eq(self.unk)] = (- 999)
rref = rref.contiguous().view((- 1))
pred = pred.contiguous().view((- 1))
C.bleu_add(ctypes.byref(self.stat), ctypes.c_size_t(rref.size(0)), ctypes.c_void_p(rref.data_ptr()), ctypes.c_size_t(pred.size(0)), ctypes.c_void_p(pred.data_ptr()), ctypes.c_int(self.pad), ctypes.c_int(self.eos))
def score(self, order=4):
psum = sum(((math.log(p) if (p > 0) else float('-Inf')) for p in self.precision()[:order]))
return ((self.brevity() * math.exp((psum / order))) * 100)
def precision(self):
def ratio(a, b):
return ((a / b) if (b > 0) else 0)
return [ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4)]
def brevity(self):
r = (self.stat.reflen / self.stat.predlen)
return min(1, math.exp((1 - r)))
def result_string(self, order=4):
assert (order <= 4), "BLEU scores for order > 4 aren't supported"
fmt = 'BLEU{} = {:2.2f}, {:2.1f}'
for _ in range(1, order):
fmt += '/{:2.1f}'
fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})'
bleup = [(p * 100) for p in self.precision()[:order]]
return fmt.format(order, self.score(order=order), *bleup, self.brevity(), (self.stat.predlen / self.stat.reflen), self.stat.predlen, self.stat.reflen)
|
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
prev_best = getattr(save_checkpoint, 'best', val_loss)
if (val_loss is not None):
best_function = (max if args.maximize_best_checkpoint_metric else min)
save_checkpoint.best = best_function(val_loss, prev_best)
if (args.no_save or (not distributed_utils.is_master(args))):
return
def is_better(a, b):
return ((a >= b) if args.maximize_best_checkpoint_metric else (a <= b))
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (end_of_epoch and (not args.no_epoch_checkpoints) and ((epoch % args.save_interval) == 0))
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = ((not end_of_epoch) and (args.save_interval_updates > 0) and ((updates % args.save_interval_updates) == 0))
checkpoint_conds['checkpoint_best.pt'] = ((val_loss is not None) and ((not hasattr(save_checkpoint, 'best')) or is_better(val_loss, save_checkpoint.best)))
checkpoint_conds['checkpoint_last.pt'] = (not args.no_last_checkpoints)
extra_state = {'train_iterator': epoch_itr.state_dict(), 'val_loss': val_loss, 'not_best': getattr(save_checkpoint, 'not_best', 0)}
if hasattr(save_checkpoint, 'best'):
extra_state.update({'best': save_checkpoint.best})
checkpoints = [os.path.join(args.save_dir, fn) for (fn, cond) in checkpoint_conds.items() if cond]
if (len(checkpoints) > 0):
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
try:
from fairseq.fb_pathmgr import fb_pathmgr
fb_pathmgr.copy(checkpoints[0], cp, True)
except (ModuleNotFoundError, ImportError):
shutil.copyfile(checkpoints[0], cp)
write_timer.stop()
print('| saved checkpoint {} (epoch {} @ {} updates) (writing took {} seconds)'.format(checkpoints[0], epoch, updates, write_timer.sum))
if ((not end_of_epoch) and (args.keep_interval_updates > 0)):
checkpoints = checkpoint_paths(args.save_dir, pattern='checkpoint_\\d+_(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if (args.keep_last_epochs > 0):
checkpoints = checkpoint_paths(args.save_dir, pattern='checkpoint(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
|
def load_checkpoint(args, trainer, data_selector=None):
'Load a checkpoint and restore the training iterator.'
if (args.distributed_rank == 0):
os.makedirs(args.save_dir, exist_ok=True)
if (args.restore_file == 'checkpoint_last.pt'):
checkpoint_path = os.path.join(args.save_dir, 'checkpoint_last.pt')
else:
checkpoint_path = args.restore_file
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler, eval(args.optimizer_overrides), reset_meters=args.reset_meters)
if ((extra_state is not None) and ('best' in extra_state) and (not args.reset_optimizer) and (not args.reset_meters)):
save_checkpoint.best = extra_state['best']
if ((extra_state is not None) and ('not_best' in extra_state) and (not args.reset_optimizer) and (not args.reset_meters)):
save_checkpoint.not_best = extra_state['not_best']
if ((extra_state is not None) and (not args.reset_dataloader)):
itr_state = extra_state['train_iterator']
epoch_itr = trainer.get_train_iterator(epoch=itr_state['epoch'], load_dataset=True, data_selector=data_selector)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(epoch=0, load_dataset=True, data_selector=data_selector)
trainer.lr_step(epoch_itr.epoch)
return (extra_state, epoch_itr)
|
def load_checkpoint_to_cpu(path, arg_overrides=None):
'Loads a checkpoint to CPU (with upgrading for backward compatibility).'
try:
with open(path, 'rb') as f:
state = torch.load(f, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
except (ModuleNotFoundError, ImportError):
state = torch.load(path, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
args = state['args']
if (arg_overrides is not None):
for (arg_name, arg_val) in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state
|
def load_model_ensemble(filenames, arg_overrides=None, task=None):
'Loads an ensemble of models.\n\n Args:\n filenames (List[str]): checkpoint files to load\n arg_overrides (Dict[str,Any], optional): override model args that\n were used during model training\n task (fairseq.tasks.FairseqTask, optional): task to use for loading\n '
(ensemble, args, _task) = load_model_ensemble_and_task(filenames, arg_overrides, task)
return (ensemble, args)
|
def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None):
from fairseq import tasks
ensemble = []
for filename in filenames:
if (not os.path.exists(filename)):
raise IOError('Model file not found: {}'.format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
args = state['args']
if (task is None):
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state['model'], strict=True, args=args)
ensemble.append(model)
return (ensemble, args, task)
|
def checkpoint_paths(path, pattern='checkpoint(\\d+)\\.pt'):
'Retrieves all checkpoints found in `path` directory.\n\n Checkpoints are identified by matching filename to the specified pattern. If\n the pattern contains groups, the result will be sorted by the first group in\n descending order.\n '
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for (i, f) in enumerate(files):
m = pt_regexp.fullmatch(f)
if (m is not None):
idx = (int(m.group(1)) if (len(m.groups()) > 0) else i)
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
|
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if (i == 2):
logging.error(traceback.format_exc())
|
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for (k, v) in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
|
def save_state(filename, args, model_state_dict, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None):
from fairseq import utils
if (optim_history is None):
optim_history = []
if (extra_state is None):
extra_state = {}
state_dict = {'args': args, 'model': (model_state_dict if model_state_dict else {}), 'optimizer_history': (optim_history + [{'criterion_name': criterion.__class__.__name__, 'optimizer_name': optimizer.__class__.__name__, 'lr_scheduler_state': lr_scheduler.state_dict(), 'num_updates': num_updates}]), 'extra_state': extra_state}
if utils.has_parameters(criterion):
state_dict['criterion'] = criterion.state_dict()
if (not args.no_save_optimizer_state):
state_dict['last_optimizer_state'] = convert_state_dict_type(optimizer.state_dict())
try:
from fairseq.fb_pathmgr import fb_pathmgr
with fb_pathmgr.open(filename, 'wb') as f:
torch_persistent_save(state_dict, f)
except (ModuleNotFoundError, ImportError):
torch_persistent_save(state_dict, filename)
|
def _upgrade_state_dict(state):
'Helper for upgrading old model checkpoints.'
from fairseq import models, registry, tasks
if ('optimizer_history' not in state):
state['optimizer_history'] = [{'criterion_name': 'CrossEntropyCriterion', 'best_loss': state['best_loss']}]
state['last_optimizer_state'] = state['optimizer']
del state['optimizer']
del state['best_loss']
if (('epoch' in state) and ('extra_state' not in state)):
state['extra_state'] = {'epoch': state['epoch'], 'batch_offset': state['batch_offset'], 'val_loss': state['val_loss']}
del state['epoch']
del state['batch_offset']
del state['val_loss']
if ('optimizer' in state['optimizer_history'][(- 1)]):
state['last_optimizer_state'] = state['optimizer_history'][(- 1)]['optimizer']
for optim_hist in state['optimizer_history']:
del optim_hist['optimizer']
if ('optimizer_name' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['optimizer_name'] = 'FairseqNAG'
if ('lr_scheduler_state' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['lr_scheduler_state'] = {'best': state['optimizer_history'][(- 1)]['best_loss']}
del state['optimizer_history'][(- 1)]['best_loss']
if ('num_updates' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['num_updates'] = 0
if (hasattr(state['args'], 'max_positions') and (not hasattr(state['args'], 'max_source_positions'))):
state['args'].max_source_positions = state['args'].max_positions
state['args'].max_target_positions = state['args'].max_positions
if ('train_iterator' not in state['extra_state']):
state['extra_state']['train_iterator'] = {'epoch': state['extra_state']['epoch'], 'iterations_in_epoch': state['extra_state'].get('batch_offset', 0)}
if (not hasattr(state['args'], 'task')):
state['args'].task = 'translation'
registry.set_defaults(state['args'], tasks.TASK_REGISTRY[state['args'].task])
registry.set_defaults(state['args'], models.ARCH_MODEL_REGISTRY[state['args'].arch])
for (registry_name, REGISTRY) in registry.REGISTRIES.items():
choice = getattr(state['args'], registry_name, None)
if (choice is not None):
cls = REGISTRY['registry'][choice]
registry.set_defaults(state['args'], cls)
return state
|
def prune_state_dict(state_dict, args):
"Prune the given state_dict if desired for LayerDrop\n (https://arxiv.org/abs/1909.11556).\n\n Training with LayerDrop allows models to be robust to pruning at inference\n time. This function prunes state_dict to allow smaller models to be loaded\n from a larger model and re-maps the existing state_dict for this to occur.\n\n It's called by functions that load models from checkpoints and does not\n need to be called directly.\n "
if ((not args) or (args.arch == 'ptt_transformer')):
return state_dict
encoder_layers_to_keep = (args.encoder_layers_to_keep if ('encoder_layers_to_keep' in vars(args)) else None)
decoder_layers_to_keep = (args.decoder_layers_to_keep if ('decoder_layers_to_keep' in vars(args)) else None)
if ((not encoder_layers_to_keep) and (not decoder_layers_to_keep)):
return state_dict
print('| Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop')
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted([int(layer_string) for layer_string in layers_to_keep.split(',')])
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile('^{layer}.*\\.layers\\.(\\d+)'.format(layer=layer_name))
return {'substitution_regex': regex, 'mapping_dict': mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, 'encoder'))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, 'decoder'))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search('\\.layers\\.(\\d+)\\.', layer_name)
if (not match):
new_state_dict[layer_name] = state_dict[layer_name]
continue
original_layer_number = match.group(1)
for pruning_pass in pruning_passes:
if ((original_layer_number in pruning_pass['mapping_dict']) and pruning_pass['substitution_regex'].search(layer_name)):
new_layer_number = pruning_pass['mapping_dict'][original_layer_number]
substitution_match = pruning_pass['substitution_regex'].search(layer_name)
new_state_key = ((layer_name[:substitution_match.start(1)] + new_layer_number) + layer_name[substitution_match.end(1):])
new_state_dict[new_state_key] = state_dict[layer_name]
return new_state_dict
|
def load_pretrained_component_from_model(component: Union[(FairseqEncoder, FairseqDecoder)], checkpoint: str):
'\n Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the\n provided `component` object. If state_dict fails to load, there may be a\n mismatch in the architecture of the corresponding `component` found in the\n `checkpoint` file.\n '
if (not os.path.exists(checkpoint)):
raise IOError('Model file not found: {}'.format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = 'encoder'
elif isinstance(component, FairseqDecoder):
component_type = 'decoder'
else:
raise ValueError('component to load must be either a FairseqEncoder or FairseqDecoder. Loading other component types are not supported.')
component_state_dict = OrderedDict()
for key in state['model'].keys():
if key.startswith(component_type):
component_subkey = key[(len(component_type) + 1):]
component_state_dict[component_subkey] = state['model'][key]
component.load_state_dict(component_state_dict, strict=True)
return component
|
def verify_checkpoint_directory(save_dir: str) -> None:
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, 'dummy')
try:
with open(temp_file_path, 'w'):
pass
except OSError as e:
print('| Unable to access checkpoint save directory: {}'.format(save_dir))
raise e
else:
os.remove(temp_file_path)
|
@register_criterion('adaptive_loss')
class AdaptiveLoss(FairseqCriterion):
'This is an implementation of the loss function accompanying the adaptive softmax approximation for\n graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"\n (http://arxiv.org/abs/1609.04309).'
def __init__(self, args, task):
super().__init__(args, task)
if (args.ddp_backend == 'c10d'):
raise Exception('AdaptiveLoss is not compatible with the c10d version of DistributedDataParallel. Please use `--ddp-backend=no_c10d` instead.')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
assert (hasattr(model.decoder, 'adaptive_softmax') and (model.decoder.adaptive_softmax is not None))
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample['net_input'])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view((- 1))
bsz = orig_target.size(0)
(logits, target) = adaptive_softmax(net_output[0], orig_target)
assert (len(target) == len(logits))
loss = net_output[0].new((1 if reduce else bsz)).zero_()
for i in range(len(target)):
if (target[i] is not None):
assert ((target[i].min() >= 0) and (target[i].max() <= logits[i].size(1)))
loss += F.cross_entropy(logits[i], target[i], ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = (sample['target'].size(0) if self.args.sentence_avg else ntokens)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': (((loss_sum / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((loss_sum / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = (((loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0)
return agg_output
|
@register_criterion('binary_cross_entropy')
class BinaryCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output, expand_steps=False).float()
if hasattr(model, 'get_target_weights'):
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
else:
weights = 1.0
loss = F.binary_cross_entropy_with_logits(logits, target, reduce=False)
loss = (loss * weights)
if reduce:
loss = loss.sum()
sample_size = target.numel()
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample_size, 'nsentences': logits.size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output
|
@register_criterion('composite_loss')
class CompositeLoss(FairseqCriterion):
'This is a composite loss that, given a list of model outputs and a list of targets,\n computes an average of losses for each output-target pair'
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss')
@staticmethod
def build_underlying_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert (saved_criterion != args.underlying_criterion)
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
@classmethod
def build_criterion(cls, args, task):
underlying_criterion = CompositeLoss.build_underlying_criterion(args, task)
class FakeModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(net_output, log_probs, sample=sample)
def get_targets(self, *unused):
return self.target
@property
def decoder(self):
return self.model.decoder
class _CompositeLoss(FairseqCriterion):
def __init__(self, args, task, underlying_criterion):
super().__init__(args, task)
self.underlying_criterion = underlying_criterion
def forward(self, model, sample, reduce=True):
net_outputs = model(**sample['net_input'])
targets = sample['target']
bsz = targets[0].size(0)
loss = net_outputs[0][0].new((1 if reduce else bsz)).float().zero_()
sample_size = 0
logging_output = {}
for (o, t) in zip(net_outputs[0], targets):
m = FakeModel(model, (o, net_outputs[1]), t)
sample['target'] = t
(l, ss, logging_output) = self.underlying_criterion(m, sample, reduce)
loss += l
sample_size += ss
loss.div_(len(targets))
sample_size /= len(targets)
logging_output['loss'] = (utils.item(loss.data) if reduce else loss.data)
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
return underlying_criterion.__class__.aggregate_logging_outputs(logging_outputs)
return _CompositeLoss(args, task, underlying_criterion)
|
@register_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': (((loss_sum / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output
|
class FairseqCriterion(_Loss):
def __init__(self, args, task):
super().__init__()
self.args = args
self.task = task
self.padding_idx = (task.target_dictionary.pad() if (task.target_dictionary is not None) else (- 100))
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
pass
@classmethod
def build_criterion(cls, args, task):
return cls(args, task)
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
raise NotImplementedError
@staticmethod
def grad_denom(sample_sizes):
'Compute the gradient denominator for a set of sample sizes.'
return sum(sample_sizes)
|
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if (target.dim() == (lprobs.dim() - 1)):
target = target.unsqueeze((- 1))
nll_loss = (- lprobs.gather(dim=(- 1), index=target))
smooth_loss = (- lprobs.sum(dim=(- 1), keepdim=True))
if (ignore_index is not None):
non_pad_mask = target.ne(ignore_index)
nll_loss = nll_loss[non_pad_mask]
smooth_loss = smooth_loss[non_pad_mask]
else:
nll_loss = nll_loss.squeeze((- 1))
smooth_loss = smooth_loss.squeeze((- 1))
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = (epsilon / lprobs.size((- 1)))
loss = (((1.0 - epsilon) * nll_loss) + (eps_i * smooth_loss))
return (loss, nll_loss)
|
@register_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
(loss, nll_loss) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1), 1)
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce)
return (loss, nll_loss)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
return {'loss': (((sum((log.get('loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
|
@register_criterion('label_smoothed_cross_entropy_with_reg')
class LabelSmoothedCrossEntropyCriterionWithReg(LabelSmoothedCrossEntropyCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.reg_lambda_hidden = args.reg_lambda_hidden
self.reg_lambda_div = args.reg_lambda_div
self.reg_lambda_consis = args.reg_lambda_consis
self.reg_lambda_decov = args.reg_lambda_decov
self.reg_lambda_pre = args.reg_lambda_pre
self.reg_lambda_fract = args.reg_lambda_fract
self.step = 1
self.max_step = args.warmup_updates
self.hook_flag = False
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
super(LabelSmoothedCrossEntropyCriterionWithReg, LabelSmoothedCrossEntropyCriterionWithReg).add_args(parser)
parser.add_argument('--reg-lambda-hidden', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-div', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-consis', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-fract', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-decov', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
parser.add_argument('--reg-lambda-pre', default=0.0, type=float, metavar='D', help='weight for the regularization loss')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
self.step += 1
if (self.step > self.max_step):
self.reg_lambda_hidden = 0.0
self.reg_lambda_pre = 0.0
self.reg_lambda_div = 0.0
self.reg_lambda_consis = 0.0
self.reg_lambda_decov = 0.0
self.reg_lambda_fract = 0.0
self.decov_hooks = []
self.pre_hooks = []
def hook_fn(module, input, output):
if (self.reg_lambda_decov != 0.0):
decov_tmp = input[0]
decov_tmp = (decov_tmp - decov_tmp.mean(dim=0, keepdim=True).mean(dim=1, keepdim=True))
decov_tmp = decov_tmp.mean(dim=0)
decov_tmp = (torch.mm(decov_tmp, decov_tmp.transpose(1, 0)) / decov_tmp.shape[0])
decov_tmp = (0.5 * (decov_tmp.norm() - (torch.diag(decov_tmp).unsqueeze(0) * decov_tmp).norm()))
decov_tmp = torch.abs(decov_tmp).mean()
self.decov_hooks.append(torch.abs(decov_tmp))
del decov_tmp
if (self.reg_lambda_pre != 0.0):
pre_tmp = torch.abs(input[0]).sum(dim=(- 1)).mean()
self.pre_hooks.append(pre_tmp)
del pre_tmp
if (not self.hook_flag):
for layer in model.encoder.layers:
layer.self_attn_layer_norm.register_forward_hook(hook_fn)
layer.final_layer_norm.register_forward_hook(hook_fn)
self.hook_flag = True
net_output = model(**sample['net_input'], return_all_hiddens=True)
(loss, nll_loss) = self.compute_loss(model, net_output, sample, reduce=reduce)
if (self.reg_lambda_decov != 0.0):
decov_loss = 0.0
for (idx, decov_inp) in enumerate(self.decov_hooks):
decov_loss += (decov_inp / len(self.decov_hooks))
decov_loss = (decov_loss * sample['ntokens'])
if (self.reg_lambda_pre != 0.0):
pre_loss = 0.0
for (idx, pre_inp) in enumerate(self.pre_hooks):
pre_loss += ((pre_inp / len(self.pre_hooks)) / (len(model.encoder.layers) + len(model.decoder.layers)))
pre_loss = (pre_loss * sample['target'].size(0))
del self.decov_hooks, self.pre_hooks
torch.cuda.empty_cache()
def check_mask_expert(norm):
if (norm is None):
return None
return norm.__dict__.get('mask_expert', None)
mask_experts_enc = [check_mask_expert(model.encoder.layer_norm)]
for layer in model.encoder.layers:
mask_experts_enc.append(check_mask_expert(layer.final_layer_norm))
mask_experts_enc.append(check_mask_expert(layer.self_attn_layer_norm))
mask_experts_dec = [check_mask_expert(model.decoder.layer_norm)]
for layer in model.decoder.layers:
mask_experts_dec.append(check_mask_expert(layer.final_layer_norm))
mask_experts_dec.append(check_mask_expert(layer.self_attn_layer_norm))
mask_experts_dec.append(check_mask_expert(layer.encoder_attn_layer_norm))
mask_experts_enc = list(filter((lambda x: (x is not None)), mask_experts_enc))
mask_experts_dec = list(filter((lambda x: (x is not None)), mask_experts_dec))
(mask_experts_enc_flag, mask_experts_dec_flag) = (len(mask_experts_enc), len(mask_experts_dec))
if mask_experts_enc:
mask_experts_enc = torch.stack(mask_experts_enc)
if mask_experts_dec:
mask_experts_dec = torch.stack(mask_experts_dec)
(div_loss, consis_loss) = (None, None)
if mask_experts_enc_flag:
div_loss = (mask_experts_enc.std(dim=1) / mask_experts_enc.mean(dim=1)).norm(dim=(- 1)).mean(dim=(- 1))
consis_loss = (mask_experts_enc.std(dim=0) / mask_experts_enc.mean(dim=0)).norm(dim=(- 1)).mean(dim=(- 1))
if mask_experts_dec_flag:
if (div_loss is None):
div_loss = (mask_experts_dec.std(dim=1) / mask_experts_dec.mean(dim=1)).norm(dim=(- 1)).mean(dim=(- 1))
consis_loss += (mask_experts_dec.std(dim=0) / mask_experts_dec.mean(dim=0)).norm(dim=(- 1)).mean(dim=(- 1))
else:
div_loss += (mask_experts_dec.std(dim=1) / mask_experts_dec.mean(dim=1)).norm(dim=(- 1))
consis_loss += (mask_experts_dec.std(dim=0) / mask_experts_dec.mean(dim=0)).norm(dim=(- 1)).mean(dim=(- 1))
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
reg_loss = 0.0
logging_output = dict()
(dec_len, enc_len) = (len(net_output[1]['inner_states']), len(net_output[1]['encoder_states']))
for inner_enc in net_output[1]['encoder_states'][2:]:
reg_loss += ((inner_enc.norm(dim=(- 1)) - net_output[1]['encoder_states'][1].norm(dim=(- 1))).abs().sum(dim=1).mean(dim=0) / (enc_len - 1))
for inner_dec in net_output[1]['inner_states'][2:]:
reg_loss += ((inner_dec.norm(dim=(- 1)) - net_output[1]['inner_states'][1].norm(dim=(- 1))).abs().sum(dim=1).mean(dim=0) / (dec_len - 1))
if (self.reg_lambda_decov != 0.0):
logging_output['reg_loss_decov'] = utils.item(decov_loss.data)
loss += (self.reg_lambda_decov * decov_loss)
if (self.reg_lambda_pre != 0.0):
logging_output['reg_loss_pre'] = utils.item(pre_loss.data)
loss += (self.reg_lambda_pre * pre_loss)
if ((reg_loss != 0.0) and (self.reg_lambda_hidden != 0.0)):
logging_output['reg_loss_hidden'] = utils.item(reg_loss.data)
loss += (self.reg_lambda_hidden * reg_loss)
if ((div_loss is not None) and (self.reg_lambda_div != 0.0)):
div_loss = (div_loss * sample_size)
logging_output['reg_loss_div'] = utils.item(div_loss.data)
loss += (self.reg_lambda_div * div_loss)
if ((consis_loss is not None) and (self.reg_lambda_consis != 0.0)):
consis_loss = (consis_loss * sample_size)
logging_output['reg_loss_consis'] = utils.item(consis_loss.data)
loss += (self.reg_lambda_consis * consis_loss)
logging_output.update({'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size})
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
return {'loss': (((sum((log.get('loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'reg_loss_hidden': (((sum((log.get('reg_loss_hidden', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_div': (((sum((log.get('reg_loss_div', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_consis': (((sum((log.get('reg_loss_consis', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_decov': (((sum((log.get('reg_loss_decov', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_pre': (((sum((log.get('reg_loss_pre', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'reg_loss_fract': (((sum((log.get('reg_loss_fract', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
|
def compute_cross_entropy_loss(logits, targets, ignore_index=(- 100)):
'\n Function to compute the cross entropy loss. The default value of\n ignore_index is the same as the default value for F.cross_entropy in\n pytorch.\n '
assert (logits.size(0) == targets.size((- 1))), "Logits and Targets tensor shapes don't match up"
loss = F.nll_loss(F.log_softmax(logits, (- 1), dtype=torch.float32), targets, reduction='sum', ignore_index=ignore_index)
return loss
|
@register_criterion('legacy_masked_lm_loss')
class LegacyMaskedLmLoss(FairseqCriterion):
'\n Implementation for the loss used in masked language model (MLM) training.\n This optionally also computes the next sentence prediction (NSP) loss and\n adds it to the overall loss based on the specified args. There are three\n cases to consider:\n 1) Generic MLM training without NSP loss. In this case sentence_targets\n and sentence_logits are both None.\n 2) BERT training without NSP loss. In this case sentence_targets is\n not None but sentence_logits is None and we should not be computing\n a sentence level loss.\n 3) BERT training with NSP loss. In this case both sentence_targets and\n sentence_logits are not None and we should be computing a sentence\n level loss. The weight of the sentence level loss is specified as\n an argument.\n '
def __init__(self, args, task):
super().__init__(args, task)
@staticmethod
def add_args(parser):
'Args for MaskedLM Loss'
parser.add_argument('--masked-lm-only', default=False, action='store_true', help='compute MLM loss only')
parser.add_argument('--nsp-loss-weight', default=1.0, type=float, help='weight for next sentence prediction loss (default 1)')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
(lm_logits, output_metadata) = model(**sample['net_input'])
lm_logits = lm_logits.view((- 1), lm_logits.size((- 1)))
lm_targets = sample['lm_target'].view((- 1))
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = (lm_loss / ntokens)
nsentences = sample['nsentences']
sentence_loss = None
if (not self.args.masked_lm_only):
sentence_logits = output_metadata['sentence_logits']
sentence_targets = sample['sentence_target'].view((- 1))
nsentences = sentence_targets.size(0)
if (sentence_logits is not None):
sentence_loss = compute_cross_entropy_loss(sentence_logits, sentence_targets)
loss += (self.args.nsp_loss_weight * (sentence_loss / nsentences))
sample_size = 1
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'lm_loss': (utils.item(lm_loss.data) if reduce else lm_loss.data), 'sentence_loss': ((utils.item(sentence_loss.data) if reduce else sentence_loss.data) if (sentence_loss is not None) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
lm_loss_sum = sum((log.get('lm_loss', 0) for log in logging_outputs))
sentence_loss_sum = sum((log.get('sentence_loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_loss = sum((log.get('loss', 0) for log in logging_outputs))
agg_output = {'loss': (((agg_loss / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'lm_loss': (((lm_loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'sentence_loss': (((sentence_loss_sum / nsentences) / math.log(2)) if (nsentences > 0) else 0.0), 'nll_loss': (((lm_loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return agg_output
|
@register_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
'\n Implementation for the loss used in masked language model (MLM) training.\n '
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if (sample_size == 0):
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if (sample_size != 0):
targets = targets[masked_tokens]
loss = F.nll_loss(F.log_softmax(logits.view((- 1), logits.size((- 1))), dim=(- 1), dtype=torch.float32), targets.view((- 1)), reduction='sum', ignore_index=self.padding_idx)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss / sample_size) / math.log(2)), 'nll_loss': (((sum((log.get('nll_loss', 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return agg_output
|
@register_criterion('nat_loss')
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def _compute_loss(self, outputs, targets, masks=None, label_smoothing=0.0, name='loss', factor=1.0):
'\n outputs: batch x len x d_model\n targets: batch x len\n masks: batch x len\n\n policy_logprob: if there is some policy\n depends on the likelihood score as rewards.\n '
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (x.float().mean().type_as(x) if (dim is None) else x.float().mean(dim).type_as(x))
if (masks is not None):
(outputs, targets) = (outputs[masks], targets[masks])
if (not masks.any()):
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=(- 1))
if (targets.dim() == 1):
losses = F.nll_loss(logits, targets.to(logits.device), reduction='none')
else:
losses = F.kl_div(logits, targets.to(logits.device), reduction='none')
losses = losses.sum((- 1))
nll_loss = mean_ds(losses)
if (label_smoothing > 0):
loss = ((nll_loss * (1 - label_smoothing)) - (mean_ds(logits) * label_smoothing))
else:
loss = nll_loss
loss = (loss * factor)
return {'name': name, 'loss': loss, 'nll_loss': nll_loss, 'factor': factor}
def _custom_loss(self, loss, name='loss'):
return {'name': name, 'loss': loss, 'factor': 1}
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
(nsentences, ntokens) = (sample['nsentences'], sample['ntokens'])
(src_tokens, src_lengths) = (sample['net_input']['src_tokens'], sample['net_input']['src_lengths'])
(tgt_tokens, prev_output_tokens) = (sample['target'], sample['prev_target'])
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
losses = []
if ('mask_ins_out' in outputs):
mask_ins_losses = self._compute_loss(outputs['mask_ins_out'], outputs['mask_ins_tgt'], outputs['mask_ins_mask'], name='m_ins-loss', factor=(1 if ('mask_ins_w' not in outputs) else outputs['mask_ins_w']))
losses += [mask_ins_losses]
if ('word_ins_out' in outputs):
word_ins_losses = self._compute_loss(outputs['word_ins_out'], outputs['word_ins_tgt'], outputs['word_ins_mask'], self.args.label_smoothing, name='w_ins-loss', factor=(1 if ('word_ins_w' not in outputs) else outputs['word_ins_w']))
losses += [word_ins_losses]
nll_loss = word_ins_losses['nll_loss']
if ('word_del_out' in outputs):
word_del_losses = self._compute_loss(outputs['word_del_out'], outputs['word_del_tgt'], outputs['word_del_mask'], 0.01, name='w_del-loss', factor=(1 if ('word_del_w' not in outputs) else outputs['word_del_w']))
losses += [word_del_losses]
if ('length_out' in outputs):
length_losses = self._compute_loss(outputs['length_out'], outputs['length_tgt'], name='len-loss', factor=(1 if ('length_w' not in outputs) else outputs['length_w']))
losses += [length_losses]
for w in outputs:
if ('-loss' in w):
losses += [self._custom_loss(outputs[w], w)]
loss = sum((l['loss'] for l in losses))
sample_size = 1
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
for l in losses:
logging_output[l['name']] = (utils.item((l['loss'].data / l['factor'])) if reduce else (l[['loss']].data / l['factor']))
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
loss = sum((log.get('loss', 0) for log in logging_outputs))
nll_loss = sum((log.get('nll_loss', 0) for log in logging_outputs))
results = {'loss': (((loss / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'nll_loss': (((nll_loss / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
for key in logging_outputs[0]:
if (key[(- 5):] == '-loss'):
results[key[:(- 5)]] = (((sum((log.get(key, 0) for log in logging_outputs)) / sample_size) / math.log(2)) if (sample_size > 0) else 0.0)
return results
|
@register_criterion('sentence_prediction')
class SentencePredictionCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
assert (hasattr(model, 'classification_heads') and ('sentence_classification_head' in model.classification_heads)), 'model must provide sentence classification head for --criterion=sentence_prediction'
(logits, _) = model(**sample['net_input'], features_only=True, classification_head_name='sentence_classification_head')
targets = model.get_targets(sample, [logits]).view((- 1))
sample_size = targets.numel()
if (not self.args.regression_target):
loss = F.nll_loss(F.log_softmax(logits, dim=(- 1), dtype=torch.float32), targets, reduction='sum')
else:
logits = logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction='sum')
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size}
if (not self.args.regression_target):
preds = logits.max(dim=1)[1]
logging_output.update(ncorrect=(preds == targets).sum().item())
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if ((len(logging_outputs) > 0) and ('ncorrect' in logging_outputs[0])):
ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs))
agg_output.update(accuracy=(ncorrect / nsentences))
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output
|
@register_criterion('sentence_ranking')
class SentenceRankingCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if (self.args.save_predictions is not None):
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
def __del__(self):
if (self.prediction_h is not None):
self.prediction_h.close()
@staticmethod
def add_args(parser):
parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to')
def forward(self, model, sample, reduce=True):
'Compute ranking loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
scores = []
for idx in range(self.args.num_classes):
(score, _) = model(**sample['net_input{idx}'.format(idx=(idx + 1))], classification_head_name='sentence_classification_head')
scores.append(score)
logits = torch.cat(scores, dim=1)
sample_size = logits.size(0)
if ('target' in sample):
targets = model.get_targets(sample, [logits]).view((- 1))
loss = F.nll_loss(F.log_softmax(logits, dim=(- 1), dtype=torch.float32), targets, reduction='sum')
else:
targets = None
loss = torch.tensor(0.0, requires_grad=True)
if (self.prediction_h is not None):
preds = logits.argmax(dim=1)
for (i, (id, pred)) in enumerate(zip(sample['id'].tolist(), preds.tolist())):
if (targets is not None):
label = targets[i].item()
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
else:
print('{}\t{}'.format(id, pred), file=self.prediction_h)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size}
if (targets is not None):
logging_output.update(ncorrect=(logits.max(dim=1)[1] == targets).sum().item())
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if ((len(logging_outputs) > 0) and ('ncorrect' in logging_outputs[0])):
ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs))
agg_output.update(accuracy=(ncorrect / nsentences))
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output
|
class RawAudioDataset(FairseqDataset):
def __init__(self, sample_rate, max_sample_size=None, min_sample_size=None, shuffle=True, min_length=0):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (max_sample_size if (max_sample_size is not None) else sys.maxsize)
self.min_sample_size = (min_sample_size if (min_sample_size is not None) else self.max_sample_size)
self.min_length = min_length
self.shuffle = shuffle
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
def resample(x, factor):
return F.interpolate(x.view(1, 1, (- 1)), scale_factor=factor).squeeze()
if (feats.dim() == 2):
feats = feats.mean((- 1))
if (curr_sample_rate != self.sample_rate):
factor = (self.sample_rate / curr_sample_rate)
feats = resample(feats, factor)
assert (feats.dim() == 1), feats.dim()
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = (size - target_size)
if (diff <= 0):
return wav
start = np.random.randint(0, (diff + 1))
end = ((size - diff) + start)
return wav[start:end]
def collater(self, samples):
samples = [s for s in samples if ((s['source'] is not None) and (len(s['source']) > 0))]
if (len(samples) == 0):
return {}
sources = [s['source'] for s in samples]
sizes = [len(s) for s in sources]
target_size = min(min(sizes), self.max_sample_size)
if (target_size < self.min_length):
return {}
if (self.min_sample_size < target_size):
target_size = np.random.randint(self.min_sample_size, (target_size + 1))
collated_sources = sources[0].new(len(sources), target_size)
for (i, (source, size)) in enumerate(zip(sources, sizes)):
diff = (size - target_size)
assert (diff >= 0)
if (diff == 0):
collated_sources[i] = source
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
return {'id': torch.LongTensor([s['id'] for s in samples]), 'net_input': {'source': collated_sources}}
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
|
class FileAudioDataset(RawAudioDataset):
def __init__(self, manifest_path, sample_rate, max_sample_size=None, min_sample_size=None, shuffle=True, min_length=0):
super().__init__(sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, min_length=min_length)
self.fnames = []
with open(manifest_path, 'r') as f:
self.root_dir = f.readline().strip()
for line in f:
items = line.strip().split('\t')
assert (len(items) == 2), line
self.fnames.append(items[0])
self.sizes.append(int(items[1]))
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
(wav, curr_sample_rate) = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {'id': index, 'source': feats}
|
def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True):
"Backtranslate a list of samples.\n\n Given an input (*samples*) of the form:\n\n [{'id': 1, 'source': 'hallo welt'}]\n\n this will return:\n\n [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}]\n\n Args:\n samples (List[dict]): samples to backtranslate. Individual samples are\n expected to have a 'source' key, which will become the 'target'\n after backtranslation.\n collate_fn (callable): function to collate samples into a mini-batch\n generate_fn (callable): function to generate backtranslations\n cuda (bool): use GPU for generation (default: ``True``)\n\n Returns:\n List[dict]: an updated list of samples with a backtranslated source\n "
collated_samples = collate_fn(samples)
s = (utils.move_to_cuda(collated_samples) if cuda else collated_samples)
generated_sources = generate_fn(s)
id_to_src = {sample['id']: sample['source'] for sample in samples}
return [{'id': id.item(), 'target': id_to_src[id.item()], 'source': hypos[0]['tokens'].cpu()} for (id, hypos) in zip(collated_samples['id'], generated_sources)]
|
class BacktranslationDataset(FairseqDataset):
'\n Sets up a backtranslation dataset which takes a tgt batch, generates\n a src using a tgt-src backtranslation function (*backtranslation_fn*),\n and returns the corresponding `{generated src, input tgt}` batch.\n\n Args:\n tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be\n backtranslated. Only the source side of this dataset will be used.\n After backtranslation, the source sentences in this dataset will be\n returned as the targets.\n src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated\n sentences.\n tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of\n sentences to be backtranslated.\n backtranslation_fn (callable, optional): function to call to generate\n backtranslations. This is typically the `generate` method of a\n :class:`~fairseq.sequence_generator.SequenceGenerator` object.\n Pass in None when it is not available at initialization time, and\n use set_backtranslation_fn function to set it when available.\n output_collater (callable, optional): function to call on the\n backtranslated samples to create the final batch\n (default: ``tgt_dataset.collater``).\n cuda: use GPU for generation\n '
def __init__(self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs):
self.tgt_dataset = tgt_dataset
self.backtranslation_fn = backtranslation_fn
self.output_collater = (output_collater if (output_collater is not None) else tgt_dataset.collater)
self.cuda = (cuda if torch.cuda.is_available() else False)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def __getitem__(self, index):
'\n Returns a single sample from *tgt_dataset*. Note that backtranslation is\n not applied in this step; use :func:`collater` instead to backtranslate\n a batch of samples.\n '
return self.tgt_dataset[index]
def __len__(self):
return len(self.tgt_dataset)
def set_backtranslation_fn(self, backtranslation_fn):
self.backtranslation_fn = backtranslation_fn
def collater(self, samples):
'Merge and backtranslate a list of samples to form a mini-batch.\n\n Using the samples from *tgt_dataset*, load a collated target sample to\n feed to the backtranslation model. Then take the backtranslation with\n the best score as the source and the original input as the target.\n\n Note: we expect *tgt_dataset* to provide a function `collater()` that\n will collate samples into the format expected by *backtranslation_fn*.\n After backtranslation, we will feed the new list of samples (i.e., the\n `(backtranslated source, original source)` pairs) to *output_collater*\n and return the result.\n\n Args:\n samples (List[dict]): samples to backtranslate and collate\n\n Returns:\n dict: a mini-batch with keys coming from *output_collater*\n '
if samples[0].get('is_dummy', False):
return samples
samples = backtranslate_samples(samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=(lambda net_input: self.backtranslation_fn(net_input)), cuda=self.cuda)
return self.output_collater(samples)
def num_tokens(self, index):
'Just use the tgt dataset num_tokens'
return self.tgt_dataset.num_tokens(index)
def ordered_indices(self):
'Just use the tgt dataset ordered_indices'
return self.tgt_dataset.ordered_indices()
def size(self, index):
"Return an example's size as a float or tuple. This value is used\n when filtering a dataset with ``--max-positions``.\n\n Note: we use *tgt_dataset* to approximate the length of the source\n sentence, since we do not know the actual length until after\n backtranslation.\n "
tgt_size = self.tgt_dataset.size(index)[0]
return (tgt_size, tgt_size)
@property
def supports_prefetch(self):
return getattr(self.tgt_dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.tgt_dataset.prefetch(indices)
|
class BaseWrapperDataset(FairseqDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, 'collater'):
return self.dataset.collater(samples)
else:
return default_collate(samples)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
if hasattr(self.dataset, 'set_epoch'):
self.dataset.set_epoch(epoch)
|
class ColorizeDataset(BaseWrapperDataset):
" Adds 'colors' property to net input that is obtained from the provided color getter for use by models "
def __init__(self, dataset, color_getter):
super().__init__(dataset)
self.color_getter = color_getter
def collater(self, samples):
base_collate = super().collater(samples)
if (len(base_collate) > 0):
base_collate['net_input']['colors'] = torch.tensor(list((self.color_getter(self.dataset, s['id']) for s in samples)), dtype=torch.long)
return base_collate
|
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
(r, s) = ([], 0)
for (e, ratio) in zip(sequence, sample_ratios):
curr_len = int((ratio * len(e)))
r.append((curr_len + s))
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = ([sample_ratios] * len(self.datasets))
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
(dataset_idx, sample_idx) = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
sample_idx = (sample_idx % self.real_sizes[dataset_idx])
return (dataset_idx, sample_idx)
def collater(self, samples):
if hasattr(self.datasets[0], 'collater'):
return self.datasets[0].collater(samples)
else:
return default_collate(samples)
def size(self, idx: int):
"\n Return an example's size as a float or tuple.\n "
(dataset_idx, sample_idx) = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for (ds, sr) in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all((d.supports_prefetch for d in self.datasets))
def ordered_indices(self):
'\n Returns indices sorted by length. So less padding is needed.\n '
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for (to, ds) in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, 'supports_prefetch', False):
ds.prefetch([((i - frm) % real_size) for i in indices if (frm <= i < to)])
frm = to
|
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(((len(ds) == len(datasets[0])) for ds in datasets)), 'datasets must have the same length'
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum((ds.sizes for ds in self.datasets))
def num_tokens(self, index):
return sum((ds.num_tokens(index) for ds in self.datasets))
def size(self, index):
return sum((ds.size(index) for ds in self.datasets))
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any((getattr(ds, 'supports_prefetch', False) for ds in self.datasets))
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
|
def infer_language_pair(path):
'Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx'
(src, dst) = (None, None)
for filename in os.listdir(path):
parts = filename.split('.')
if ((len(parts) >= 3) and (len(parts[1].split('-')) == 2)):
return parts[1].split('-')
return (src, dst)
|
def collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):
'Convert a list of 1d tensors into a padded 2d tensor.'
size = max((v.size(0) for v in values))
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert (dst.numel() == src.numel())
if move_eos_to_beginning:
assert (src[(- 1)] == eos_idx)
dst[0] = eos_idx
dst[1:] = src[:(- 1)]
else:
dst.copy_(src)
for (i, v) in enumerate(values):
copy_tensor(v, (res[i][(size - len(v)):] if left_pad else res[i][:len(v)]))
return res
|
def load_indexed_dataset(path, dictionary, dataset_impl=None, combine=False, default='cached'):
"A helper function for loading indexed datasets.\n\n Args:\n path (str): path to indexed dataset (e.g., 'data-bin/train')\n dictionary (~fairseq.data.Dictionary): data dictionary\n dataset_impl (str, optional): which dataset implementation to use. If\n not provided, it will be inferred automatically. For legacy indexed\n data we use the 'cached' implementation by default.\n combine (bool, optional): automatically load and combine multiple\n datasets. For example, if *path* is 'data-bin/train', then we will\n combine 'data-bin/train', 'data-bin/train1', ... and return a\n single ConcatDataset instance.\n "
from fairseq.data.concat_dataset import ConcatDataset
import fairseq.data.indexed_dataset as indexed_dataset
datasets = []
for k in itertools.count():
path_k = (path + (str(k) if (k > 0) else ''))
dataset_impl_k = dataset_impl
if (dataset_impl_k is None):
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(path_k, impl=(dataset_impl_k or default), fix_lua_indexing=True, dictionary=dictionary)
if (dataset is None):
break
print('| loaded {} examples from: {}'.format(len(dataset), path_k))
datasets.append(dataset)
if (not combine):
break
if (len(datasets) == 0):
return None
elif (len(datasets) == 1):
return datasets[0]
else:
return ConcatDataset(datasets)
|
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
'Context manager which seeds the NumPy PRNG with the specified seed and\n restores the state afterward'
if (seed is None):
(yield)
return
if (len(addl_seeds) > 0):
seed = int((hash((seed, *addl_seeds)) % 1000000.0))
state = np.random.get_state()
np.random.seed(seed)
try:
(yield)
finally:
np.random.set_state(state)
|
def collect_filtered(function, iterable, filtered):
'\n Similar to :func:`filter` but collects filtered elements in ``filtered``.\n\n Args:\n function (callable): function that returns ``False`` for elements that\n should be filtered\n iterable (iterable): iterable to filter\n filtered (list): list to store filtered elements\n '
for el in iterable:
if function(el):
(yield el)
else:
filtered.append(el)
|
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def check_size(idx):
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
return (size_fn(idx) <= max_positions)
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = (set(max_positions.keys()) & set(idx_size.keys()))
return all((all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(idx_size[key], max_positions[key]))) for key in intersect_keys))
else:
if (isinstance(size_fn(idx), dict) and isinstance(max_positions, tuple)):
return all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(size_fn(idx).values(), max_positions)))
if (not isinstance(size_fn(idx), Iterable)):
return all(((size_fn(idx) <= b) for b in max_positions))
return all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(size_fn(idx), max_positions)))
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=(- 1))
return (indices, ignored)
|
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
'\n Filter indices based on their size.\n\n Args:\n indices (List[int]): ordered list of dataset indices\n dataset (FairseqDataset): fairseq dataset instance\n max_positions (tuple): filter elements larger than this size.\n Comparisons are done component-wise.\n raise_exception (bool, optional): if ``True``, raise an exception if\n any elements are filtered (default: False).\n '
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
if (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, np.ndarray)):
ignored = indices[(dataset.sizes[indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[indices] <= max_positions)]
elif (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, list) and (len(dataset.sizes) == 1)):
ignored = indices[(dataset.sizes[0][indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[0][indices] <= max_positions)]
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if ((len(ignored) > 0) and raise_exception):
raise Exception('Size of sample #{} is invalid (={}) since max_positions={}, skip this example with --skip-invalid-size-inputs-valid-test'.format(ignored[0], dataset.size(ignored[0]), max_positions))
if (len(ignored) > 0):
print('| WARNING: {} samples have invalid sizes and will be skipped, max_positions={}, first few sample ids={}'.format(len(ignored), max_positions, ignored[:10]))
return indices
|
def batch_by_size(indices, num_tokens_fn, max_tokens=None, max_sentences=None, required_batch_size_multiple=1):
'\n Yield mini-batches of indices bucketed by size. Batches may contain\n sequences of different lengths.\n\n Args:\n indices (List[int]): ordered list of dataset indices\n num_tokens_fn (callable): function that returns the number of tokens at\n a given index\n max_tokens (int, optional): max number of tokens in each batch\n (default: None).\n max_sentences (int, optional): max number of sentences in each\n batch (default: None).\n required_batch_size_multiple (int, optional): require batch size to\n be a multiple of N (default: 1).\n '
try:
from fairseq.data.data_utils_fast import batch_by_size_fast
except ImportError:
raise ImportError('Please build Cython components with: `pip install --editable .` or `python setup.py build_ext --inplace`')
max_tokens = (max_tokens if (max_tokens is not None) else sys.maxsize)
max_sentences = (max_sentences if (max_sentences is not None) else sys.maxsize)
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=(- 1))
return batch_by_size_fast(indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult)
|
def process_bpe_symbol(sentence: str, bpe_symbol: str):
if (bpe_symbol == 'sentencepiece'):
sentence = sentence.replace(' ', '').replace('▁', ' ').strip()
elif (bpe_symbol == '_EOW'):
sentence = sentence.replace(' ', '').replace('_EOW', ' ').strip()
elif (bpe_symbol is not None):
sentence = (sentence + ' ').replace(bpe_symbol, '').rstrip()
return sentence
|
class Dictionary(object):
'A mapping from symbols to consecutive integers'
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', bos='<s>', extra_special_symbols=None):
(self.unk_word, self.pad_word, self.eos_word) = (unk, pad, eos)
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return (self.indices == other.indices)
def __getitem__(self, idx):
if (idx < len(self.symbols)):
return self.symbols[idx]
return self.unk_word
def __len__(self):
'Returns the number of symbols in the dictionary'
return len(self.symbols)
def __contains__(self, sym):
return (sym in self.indices)
def index(self, sym):
'Returns the index of the specified symbol'
assert isinstance(sym, str)
if (sym in self.indices):
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
'Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n '
if (torch.is_tensor(tensor) and (tensor.dim() == 2)):
return '\n'.join((self.string(t, bpe_symbol, escape_unk) for t in tensor))
def token_string(i):
if (i == self.unk()):
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, 'bos_index'):
sent = ' '.join((token_string(i) for i in tensor if ((i != self.eos()) and (i != self.bos()))))
else:
sent = ' '.join((token_string(i) for i in tensor if (i != self.eos())))
return data_utils.process_bpe_symbol(sent, bpe_symbol)
def unk_string(self, escape=False):
'Return unknown string, optionally escaped as: <<unk>>'
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
'Adds a word to the dictionary'
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + n)
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
'Updates counts from new dictionary.'
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + new_dict.count[idx2])
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=(- 1), nwords=(- 1), padding_factor=8):
'Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n '
if (nwords <= 0):
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(sorted(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))))
for (symbol, count) in c.most_common((nwords - self.nspecial)):
if (count >= threshold):
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
threshold_nwords = len(new_symbols)
if (padding_factor > 1):
i = 0
while ((threshold_nwords % padding_factor) != 0):
symbol = 'madeupword{:04d}'.format(i)
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(0)
i += 1
threshold_nwords += 1
assert ((len(new_symbols) % padding_factor) == 0)
assert (len(new_symbols) == len(new_indices))
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
def bos(self):
'Helper to get index of beginning-of-sentence symbol'
return self.bos_index
def pad(self):
'Helper to get index of pad symbol'
return self.pad_index
def eos(self):
'Helper to get index of end-of-sentence symbol'
return self.eos_index
def unk(self):
'Helper to get index of unk symbol'
return self.unk_index
@classmethod
def load(cls, f, ignore_utf_errors=False):
'Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n '
d = cls()
d.add_from_file(f, ignore_utf_errors)
return d
def add_from_file(self, f, ignore_utf_errors=False):
'\n Loads a pre-existing dictionary from a text file and adds its symbols\n to this instance.\n '
if isinstance(f, str):
try:
if (not ignore_utf_errors):
with open(f, 'r', encoding='utf-8') as fd:
self.add_from_file(fd)
else:
with open(f, 'r', encoding='utf-8', errors='ignore') as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(f))
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
idx = line.rfind(' ')
if (idx == (- 1)):
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
count = int(line[(idx + 1):])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
def _save(self, f, kv_iterator):
if isinstance(f, str):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for (k, v) in kv_iterator:
print('{} {}'.format(k, v), file=f)
def _get_meta(self):
return ([], [])
def _load_meta(self, lines):
return 0
def save(self, f):
'Stores dictionary into a text file'
(ex_keys, ex_vals) = self._get_meta()
self._save(f, zip((ex_keys + self.symbols[self.nspecial:]), (ex_vals + self.count[self.nspecial:])))
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_((self.nspecial + 1), len(self)).long()
t[(- 1)] = self.eos()
return t
def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(((nwords + 1) if append_eos else nwords))
for (i, word) in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if (consumer is not None):
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
counter = Counter()
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = (size // num_workers)
offset = (worker_id * chunk_size)
end = (offset + chunk_size)
f.seek(offset)
if (offset > 0):
safe_readline(f)
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if (f.tell() > end):
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for (w, c) in sorted(counter.items()):
dict.add_symbol(w, c)
if (num_workers > 1):
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(Dictionary._add_file_to_dictionary_single_worker, (filename, tokenize, dict.eos_word, worker_id, num_workers)))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))
|
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(wrapped_dict.__class__.__name__, (self.__class__, wrapped_dict.__class__), {})
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if (i < self.length):
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
|
@register_bpe('fastbpe')
class fastBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--bpe-codes', type=str, help='path to fastBPE BPE')
def __init__(self, args):
if (args.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=subword_nmt')
codes = file_utils.cached_path(args.bpe_codes)
try:
import fastBPE
self.bpe = fastBPE.fastBPE(codes)
self.bpe_symbol = '@@ '
except ImportError:
raise ImportError('Please install fastBPE with: pip install fastBPE')
def encode(self, x: str) -> str:
return self.bpe.apply([x])[0]
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip()
|
@register_bpe('gpt2')
class GPT2BPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--gpt2-encoder-json', type=str, default=DEFAULT_ENCODER_JSON, help='path to encoder.json')
parser.add_argument('--gpt2-vocab-bpe', type=str, default=DEFAULT_VOCAB_BPE, help='path to vocab.bpe')
def __init__(self, args):
encoder_json = file_utils.cached_path(getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON))
vocab_bpe = file_utils.cached_path(getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE))
self.bpe = get_encoder(encoder_json, vocab_bpe)
def encode(self, x: str) -> str:
return ' '.join(map(str, self.bpe.encode(x)))
def decode(self, x: str) -> str:
return self.bpe.decode(map(int, x.split()))
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(' ')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
class Encoder():
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
try:
import regex as re
self.re = re
except ImportError:
raise ImportError('Please install regex with: pip install regex')
self.pat = self.re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in self.re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
|
def get_encoder(encoder_json_path, vocab_bpe_path):
with open(encoder_json_path, 'r') as f:
encoder = json.load(f)
with open(vocab_bpe_path, 'r', encoding='utf-8') as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:(- 1)]]
return Encoder(encoder=encoder, bpe_merges=bpe_merges)
|
@register_bpe('bert')
class BertBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--bpe-cased', action='store_true', help='set for cased BPE', default=False)
parser.add_argument('--bpe-vocab-file', type=str, help='bpe vocab file.')
def __init__(self, args):
try:
from pytorch_transformers import BertTokenizer
from pytorch_transformers.tokenization_utils import clean_up_tokenization
except ImportError:
raise ImportError('Please install 1.0.0 version of pytorch_transformerswith: pip install pytorch-transformers')
if ('bpe_vocab_file' in args):
self.bert_tokenizer = BertTokenizer(args.bpe_vocab_file, do_lower_case=(not args.bpe_cased))
else:
vocab_file_name = ('bert-base-cased' if args.bpe_cased else 'bert-base-uncased')
self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name)
self.clean_up_tokenization = clean_up_tokenization
def encode(self, x: str) -> str:
return ' '.join(self.bert_tokenizer.tokenize(x))
def decode(self, x: str) -> str:
return self.clean_up_tokenization(self.bert_tokenizer.convert_tokens_to_string(x.split(' ')))
def is_beginning_of_word(self, x: str) -> bool:
return (not x.startswith('##'))
|
@register_tokenizer('moses')
class MosesTokenizer(object):
@staticmethod
def add_args(parser):
parser.add_argument('--moses-source-lang', metavar='SRC', help='source language')
parser.add_argument('--moses-target-lang', metavar='TARGET', help='target language')
parser.add_argument('--moses-no-dash-splits', action='store_true', default=False, help="don't apply dash split rules")
parser.add_argument('--moses-no-escape', action='store_true', default=False, help="don't perform HTML escaping on apostrophy, quotes, etc.")
def __init__(self, args):
self.args = args
if (getattr(args, 'moses_source_lang', None) is None):
args.moses_source_lang = getattr(args, 'source_lang', 'en')
if (getattr(args, 'moses_target_lang', None) is None):
args.moses_target_lang = getattr(args, 'target_lang', 'en')
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(args.moses_source_lang)
self.detok = MosesDetokenizer(args.moses_target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(x, aggressive_dash_splits=(not self.args.moses_no_dash_splits), return_str=True, escape=(not self.args.moses_no_escape))
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
|
@register_tokenizer('nltk')
class NLTKTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
try:
from nltk.tokenize import word_tokenize
self.word_tokenize = word_tokenize
except ImportError:
raise ImportError('Please install nltk with: pip install nltk')
def encode(self, x: str) -> str:
return ' '.join(self.word_tokenize(x))
def decode(self, x: str) -> str:
return x
|
@register_bpe('sentencepiece')
class SentencepieceBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--sentencepiece-vocab', type=str, help='path to sentencepiece vocab')
def __init__(self, args):
vocab = file_utils.cached_path(args.sentencepiece_vocab)
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(vocab)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
def encode(self, x: str) -> str:
return ' '.join(self.sp.EncodeAsPieces(x))
def decode(self, x: str) -> str:
return x.replace(' ', '').replace('▁', ' ').strip()
|
@register_tokenizer('space')
class SpaceTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
self.space_tok = re.compile('\\s+')
def encode(self, x: str) -> str:
return self.space_tok.sub(' ', x)
def decode(self, x: str) -> str:
return x
|
@register_bpe('subword_nmt')
class SubwordNMTBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--bpe-codes', type=str, help='path to subword NMT BPE')
parser.add_argument('--bpe-separator', default='@@', help='BPE separator')
def __init__(self, args):
if (args.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=subword_nmt')
codes = file_utils.cached_path(args.bpe_codes)
try:
from subword_nmt import apply_bpe
bpe_parser = apply_bpe.create_parser()
bpe_args = bpe_parser.parse_args(['--codes', codes, '--separator', args.bpe_separator])
self.bpe = apply_bpe.BPE(bpe_args.codes, bpe_args.merges, bpe_args.separator, None, bpe_args.glossaries)
self.bpe_symbol = (bpe_args.separator + ' ')
except ImportError:
raise ImportError('Please install subword_nmt with: pip install subword-nmt')
def encode(self, x: str) -> str:
return self.bpe.process_line(x)
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip()
|
class FairseqDataset(torch.utils.data.Dataset):
'A dataset that provides helpers for batching.'
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch suitable for forwarding with a Model\n '
raise NotImplementedError
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
raise NotImplementedError
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
raise NotImplementedError
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
return np.arange(len(self))
@property
def supports_prefetch(self):
'Whether this dataset supports prefetching.'
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
'Prefetch the data required for this epoch.'
raise NotImplementedError
def set_epoch(self, epoch):
pass
|
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
|
def __best_fitting_dtype(vocab_size=None):
if ((vocab_size is not None) and (vocab_size < 65500)):
return np.uint16
else:
return np.int32
|
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap']
|
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if (magic == IndexedDataset._HDR_MAGIC):
return 'cached'
elif (magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]):
return 'mmap'
else:
return None
else:
return None
|
def make_builder(out_file, impl, vocab_size=None):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
|
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if ((impl == 'raw') and IndexedRawTextDataset.exists(path)):
assert (dictionary is not None)
return IndexedRawTextDataset(path, dictionary)
elif ((impl == 'lazy') and IndexedDataset.exists(path)):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'cached') and IndexedDataset.exists(path)):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'mmap') and MMapIndexedDataset.exists(path)):
return MMapIndexedDataset(path)
return None
|
def dataset_exists(path, impl):
if (impl == 'raw'):
return IndexedRawTextDataset.exists(path)
elif (impl == 'mmap'):
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
|
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
|
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
|
def code(dtype):
for k in dtypes.keys():
if (dtypes[k] == dtype):
return k
raise ValueError(dtype)
|
def index_file_path(prefix_path):
return (prefix_path + '.idx')
|
def data_file_path(prefix_path):
return (prefix_path + '.bin')
|
class IndexedDataset(FairseqDataset):
'Loader for TorchNet IndexedDataset'
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert (magic == self._HDR_MAGIC), "Index file doesn't match expected format. Make sure that --dataset-impl is configured properly."
version = f.read(8)
assert (struct.unpack('<Q', version) == (1,))
(code, self.element_size) = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
(self._len, self.s) = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, (self._len + 1))
self.data_offsets = read_longs(f, (self._len + 1))
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if ((i < 0) or (i >= self._len)):
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if (not self.data_file):
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek((self.data_offsets[i] * self.element_size))
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
@property
def supports_prefetch(self):
return False
|
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(((i in self.cache_index) for i in indices)):
return
if (not self.data_file):
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += (self.data_offsets[(i + 1)] - self.data_offsets[i])
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = (self.data_offsets[(i + 1)] - self.data_offsets[i])
a = self.cache[ptx:(ptx + size)]
self.data_file.seek((self.data_offsets[i] * self.element_size))
self.data_file.readinto(a)
ptx += size
if self.data_file:
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx:(ptx + a.size)])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1
return item
|
class IndexedRawTextDataset(FairseqDataset):
'Takes a text file as input and binarizes it in memory at instantiation.\n Original lines are also kept in memory'
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if ((i < 0) or (i >= self.size)):
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
|
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
bytes = self.out_file.write(np.array((tensor.numpy() + 1), dtype=self.dtype))
self.data_offsets.append((self.data_offsets[(- 1)] + (bytes / self.element_size)))
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append((self.dim_offsets[(- 1)] + len(tensor.size())))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert (index.dtype == self.dtype)
begin = self.data_offsets[(- 1)]
for offset in index.data_offsets[1:]:
self.data_offsets.append((begin + offset))
self.sizes.extend(index.sizes)
begin = self.dim_offsets[(- 1)]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append((begin + dim_offset))
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', (len(self.data_offsets) - 1), len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
|
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(((100 * 1024) * 1024)):
pass
|
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += (size * dtype_size)
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert (self._HDR_MAGIC == magic_test), "Index file doesn't match expected format. Make sure that --dataset-impl is configured properly."
version = struct.unpack('<Q', stream.read(8))
assert ((1,) == version)
(dtype_code,) = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len, offset=(offset + self._sizes.nbytes))
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return (self._pointers[i], self._sizes[i])
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
(ptr, size) = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if (self._index.dtype != np.int64):
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
|
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert (index.dtype == self._dtype)
for size in index.sizes:
self._sizes.append(size)
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
|
class CountingIterator(object):
'Wrapper around an iterable that maintains the iteration count.\n\n Args:\n iterable (iterable): iterable to wrap\n\n Attributes:\n count (int): number of elements consumed from this iterator\n '
def __init__(self, iterable, start=0):
self.iterable = iterable
self.count = start
self.itr = iter(self)
self.len = (start + len(iterable))
def __len__(self):
return self.len
def __iter__(self):
for x in self.iterable:
if (self.count >= self.len):
return
self.count += 1
(yield x)
def __next__(self):
return next(self.itr)
def has_next(self):
'Whether the iterator has been exhausted.'
return (self.count < len(self))
def skip(self, num_to_skip):
'Fast-forward the iterator by skipping *num_to_skip* elements.'
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
'\n Truncates the iterator to n elements at most.\n '
self.len = min(self.len, n)
|
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
raise NotImplementedError
def end_of_epoch(self) -> bool:
'Returns whether the most recent epoch iterator has been exhausted'
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
raise NotImplementedError
def state_dict(self):
raise NotImplementedError
def load_state_dict(self, state_dict):
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.