code stringlengths 17 6.64M |
|---|
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
prev_best = getattr(save_checkpoint, 'best', val_loss)
if (val_loss is not None):
best_function = (max if args.maximize_best_checkpoint_metric else min)
save_checkpoint.best = best_function(val_loss, prev_best)
if (args.no_save or (not distributed_utils.is_master(args))):
return
def is_better(a, b):
return ((a >= b) if args.maximize_best_checkpoint_metric else (a <= b))
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (end_of_epoch and (not args.no_epoch_checkpoints) and ((epoch % args.save_interval) == 0))
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = ((not end_of_epoch) and (args.save_interval_updates > 0) and ((updates % args.save_interval_updates) == 0))
checkpoint_conds['checkpoint_best.pt'] = ((val_loss is not None) and ((not hasattr(save_checkpoint, 'best')) or is_better(val_loss, save_checkpoint.best)))
if ((val_loss is not None) and (args.keep_best_checkpoints > 0)):
checkpoint_conds['checkpoint.best_{}_{:.4f}.pt'.format(args.best_checkpoint_metric, val_loss)] = ((not hasattr(save_checkpoint, 'best')) or is_better(val_loss, save_checkpoint.best))
checkpoint_conds['checkpoint_last.pt'] = (not args.no_last_checkpoints)
extra_state = {'train_iterator': epoch_itr.state_dict(), 'val_loss': val_loss}
if hasattr(save_checkpoint, 'best'):
extra_state.update({'best': save_checkpoint.best})
checkpoints = [os.path.join(args.save_dir, fn) for (fn, cond) in checkpoint_conds.items() if cond]
if (len(checkpoints) > 0):
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info('saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {:.3f} seconds)'.format(checkpoints[0], epoch, updates, val_loss, write_timer.sum))
if ((not end_of_epoch) and (args.keep_interval_updates > 0)):
checkpoints = checkpoint_paths(args.save_dir, pattern='checkpoint_\\d+_(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if (args.keep_last_epochs > 0):
checkpoints = checkpoint_paths(args.save_dir, pattern='checkpoint(\\d+)\\.pt')
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if (args.keep_best_checkpoints > 0):
checkpoints = checkpoint_paths(args.save_dir, pattern='checkpoint\\.best_{}_(\\d+\\.?\\d*)\\.pt'.format(args.best_checkpoint_metric))
if (not args.maximize_best_checkpoint_metric):
checkpoints = checkpoints[::(- 1)]
for old_chk in checkpoints[args.keep_best_checkpoints:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
|
def load_checkpoint(args, trainer, **passthrough_args):
'\n Load a checkpoint and restore the training iterator.\n\n *passthrough_args* will be passed through to\n ``trainer.get_train_iterator``.\n '
if (args.distributed_rank == 0):
os.makedirs(args.save_dir, exist_ok=True)
if (args.restore_file == 'checkpoint_last.pt'):
checkpoint_path = os.path.join(args.save_dir, 'checkpoint_last.pt')
else:
checkpoint_path = args.restore_file
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler, eval(args.optimizer_overrides), reset_meters=args.reset_meters)
if ((extra_state is not None) and ('best' in extra_state) and (not args.reset_optimizer) and (not args.reset_meters)):
save_checkpoint.best = extra_state['best']
if ((extra_state is not None) and (not args.reset_dataloader)):
itr_state = extra_state['train_iterator']
epoch_itr = trainer.get_train_iterator(epoch=itr_state['epoch'], load_dataset=True, **passthrough_args)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(epoch=0, load_dataset=True, **passthrough_args)
trainer.lr_step(epoch_itr.epoch)
return (extra_state, epoch_itr)
|
def load_checkpoint_to_cpu(path, arg_overrides=None):
'Loads a checkpoint to CPU (with upgrading for backward compatibility).'
with PathManager.open(path, 'rb') as f:
state = torch.load(f, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
args = state['args']
if (arg_overrides is not None):
for (arg_name, arg_val) in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state
|
def load_model_ensemble(filenames, arg_overrides=None, task=None):
'Loads an ensemble of models.\n\n Args:\n filenames (List[str]): checkpoint files to load\n arg_overrides (Dict[str,Any], optional): override model args that\n were used during model training\n task (fairseq.tasks.FairseqTask, optional): task to use for loading\n '
(ensemble, args, _task) = load_model_ensemble_and_task(filenames, arg_overrides, task)
return (ensemble, args)
|
def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None):
from fairseq import tasks
ensemble = []
for filename in filenames:
if (not os.path.exists(filename)):
raise IOError('Model file not found: {}'.format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
args = state['args']
if (task is None):
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state['model'], strict=True, args=args)
ensemble.append(model)
return (ensemble, args, task)
|
def checkpoint_paths(path, pattern='checkpoint(\\d+)\\.pt'):
'Retrieves all checkpoints found in `path` directory.\n\n Checkpoints are identified by matching filename to the specified pattern. If\n the pattern contains groups, the result will be sorted by the first group in\n descending order.\n '
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for (i, f) in enumerate(files):
m = pt_regexp.fullmatch(f)
if (m is not None):
idx = (float(m.group(1)) if (len(m.groups()) > 0) else i)
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
|
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if (i == 2):
logger.error(traceback.format_exc())
|
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for (k, v) in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
|
def save_state(filename, args, model_state_dict, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None):
from fairseq import utils
if (optim_history is None):
optim_history = []
if (extra_state is None):
extra_state = {}
state_dict = {'args': args, 'model': (model_state_dict if model_state_dict else {}), 'optimizer_history': (optim_history + [{'criterion_name': criterion.__class__.__name__, 'optimizer_name': optimizer.__class__.__name__, 'lr_scheduler_state': lr_scheduler.state_dict(), 'num_updates': num_updates}]), 'extra_state': extra_state}
if utils.has_parameters(criterion):
state_dict['criterion'] = criterion.state_dict()
if (not args.no_save_optimizer_state):
state_dict['last_optimizer_state'] = convert_state_dict_type(optimizer.state_dict())
with PathManager.open(filename, 'wb') as f:
torch_persistent_save(state_dict, f)
|
def _upgrade_state_dict(state):
'Helper for upgrading old model checkpoints.'
from fairseq import models, registry, tasks
if ('optimizer_history' not in state):
state['optimizer_history'] = [{'criterion_name': 'CrossEntropyCriterion', 'best_loss': state['best_loss']}]
state['last_optimizer_state'] = state['optimizer']
del state['optimizer']
del state['best_loss']
if (('epoch' in state) and ('extra_state' not in state)):
state['extra_state'] = {'epoch': state['epoch'], 'batch_offset': state['batch_offset'], 'val_loss': state['val_loss']}
del state['epoch']
del state['batch_offset']
del state['val_loss']
if ('optimizer' in state['optimizer_history'][(- 1)]):
state['last_optimizer_state'] = state['optimizer_history'][(- 1)]['optimizer']
for optim_hist in state['optimizer_history']:
del optim_hist['optimizer']
if ('optimizer_name' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['optimizer_name'] = 'FairseqNAG'
if ('lr_scheduler_state' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['lr_scheduler_state'] = {'best': state['optimizer_history'][(- 1)]['best_loss']}
del state['optimizer_history'][(- 1)]['best_loss']
if ('num_updates' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['num_updates'] = 0
if (hasattr(state['args'], 'max_positions') and (not hasattr(state['args'], 'max_source_positions'))):
state['args'].max_source_positions = state['args'].max_positions
state['args'].max_target_positions = state['args'].max_positions
if ('train_iterator' not in state['extra_state']):
state['extra_state']['train_iterator'] = {'epoch': state['extra_state']['epoch'], 'iterations_in_epoch': state['extra_state'].get('batch_offset', 0)}
if (not hasattr(state['args'], 'task')):
state['args'].task = 'translation'
if getattr(state['args'], 'raw_text', False):
state['args'].dataset_impl = 'raw'
elif getattr(state['args'], 'lazy_load', False):
state['args'].dataset_impl = 'lazy'
registry.set_defaults(state['args'], tasks.TASK_REGISTRY[state['args'].task])
registry.set_defaults(state['args'], models.ARCH_MODEL_REGISTRY[state['args'].arch])
for (registry_name, REGISTRY) in registry.REGISTRIES.items():
choice = getattr(state['args'], registry_name, None)
if (choice is not None):
cls = REGISTRY['registry'][choice]
registry.set_defaults(state['args'], cls)
return state
|
def prune_state_dict(state_dict, args):
"Prune the given state_dict if desired for LayerDrop\n (https://arxiv.org/abs/1909.11556).\n\n Training with LayerDrop allows models to be robust to pruning at inference\n time. This function prunes state_dict to allow smaller models to be loaded\n from a larger model and re-maps the existing state_dict for this to occur.\n\n It's called by functions that load models from checkpoints and does not\n need to be called directly.\n "
if ((not args) or (args.arch == 'ptt_transformer')):
return state_dict
encoder_layers_to_keep = (args.encoder_layers_to_keep if ('encoder_layers_to_keep' in vars(args)) else None)
decoder_layers_to_keep = (args.decoder_layers_to_keep if ('decoder_layers_to_keep' in vars(args)) else None)
if ((not encoder_layers_to_keep) and (not decoder_layers_to_keep)):
return state_dict
logger.info('Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop')
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted([int(layer_string) for layer_string in layers_to_keep.split(',')])
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile('^{layer}.*\\.layers\\.(\\d+)'.format(layer=layer_name))
return {'substitution_regex': regex, 'mapping_dict': mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, 'encoder'))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, 'decoder'))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search('\\.layers\\.(\\d+)\\.', layer_name)
if (not match):
new_state_dict[layer_name] = state_dict[layer_name]
continue
original_layer_number = match.group(1)
for pruning_pass in pruning_passes:
if ((original_layer_number in pruning_pass['mapping_dict']) and pruning_pass['substitution_regex'].search(layer_name)):
new_layer_number = pruning_pass['mapping_dict'][original_layer_number]
substitution_match = pruning_pass['substitution_regex'].search(layer_name)
new_state_key = ((layer_name[:substitution_match.start(1)] + new_layer_number) + layer_name[substitution_match.end(1):])
new_state_dict[new_state_key] = state_dict[layer_name]
if ('encoder_layers_to_keep' in vars(args)):
args.encoder_layers_to_keep = None
if ('decoder_layers_to_keep' in vars(args)):
args.decoder_layers_to_keep = None
return new_state_dict
|
def load_pretrained_component_from_model(component: Union[(FairseqEncoder, FairseqDecoder)], checkpoint: str):
'\n Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the\n provided `component` object. If state_dict fails to load, there may be a\n mismatch in the architecture of the corresponding `component` found in the\n `checkpoint` file.\n '
if (not PathManager.exists(checkpoint)):
raise IOError('Model file not found: {}'.format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = 'encoder'
elif isinstance(component, FairseqDecoder):
component_type = 'decoder'
else:
raise ValueError('component to load must be either a FairseqEncoder or FairseqDecoder. Loading other component types are not supported.')
component_state_dict = OrderedDict()
for key in state['model'].keys():
if key.startswith(component_type):
component_subkey = key[(len(component_type) + 1):]
component_state_dict[component_subkey] = state['model'][key]
component.load_state_dict(component_state_dict, strict=True)
return component
|
def verify_checkpoint_directory(save_dir: str) -> None:
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, 'dummy')
try:
with open(temp_file_path, 'w'):
pass
except OSError as e:
logger.warning('Unable to access checkpoint save directory: {}'.format(save_dir))
raise e
else:
os.remove(temp_file_path)
|
@register_criterion('adaptive_cross_entropy')
class AdaptiveCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = 1e-07
self.smooth = args.label_smoothing
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--label-smoothing', default=0.2, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
(loss, nll_loss) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def get_class_weight(self, target, n_classes):
with torch.no_grad():
dev = target.device
class_wise_dist = torch.histc(target.float(), bins=n_classes, min=0, max=(n_classes - 1))
class_wise_dist = class_wise_dist.float().to(device=dev)
class_wise_dist = (class_wise_dist / (torch.sum(class_wise_dist) + self.eps))
ids_to_discard = (class_wise_dist == 0)
class_wise_dist = ((1.0 + self.smooth) - class_wise_dist)
class_wise_dist[ids_to_discard] = 0.0
return class_wise_dist.to(dev)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
n_classes = lprobs.size((- 1))
weights = self.get_class_weight(target=target, n_classes=n_classes)
if self.training:
loss = F.nll_loss(lprobs, target, weight=weights, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
else:
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
nll_loss_sum = sum((log.get('nll_loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('nll_loss', ((nll_loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['nll_loss'].avg), 3)))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
@register_criterion('adaptive_loss')
class AdaptiveLoss(FairseqCriterion):
'This is an implementation of the loss function accompanying the adaptive softmax approximation for\n graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"\n (http://arxiv.org/abs/1609.04309).'
def __init__(self, args, task):
super().__init__(args, task)
if (args.ddp_backend == 'c10d'):
raise Exception('AdaptiveLoss is not compatible with the c10d version of DistributedDataParallel. Please use `--ddp-backend=no_c10d` instead.')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
assert (hasattr(model.decoder, 'adaptive_softmax') and (model.decoder.adaptive_softmax is not None))
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample['net_input'])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view((- 1))
bsz = orig_target.size(0)
(logits, target) = adaptive_softmax(net_output[0], orig_target)
assert (len(target) == len(logits))
loss = net_output[0].new((1 if reduce else bsz)).zero_()
for i in range(len(target)):
if (target[i] is not None):
assert ((target[i].min() >= 0) and (target[i].max() <= logits[i].size(1)))
loss += F.cross_entropy(logits[i], target[i], ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = (sample['target'].size(0) if self.args.sentence_avg else ntokens)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['nll_loss'].avg), 3)))
else:
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['loss'].avg), 3)))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
@register_criterion('binary_cross_entropy')
class BinaryCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True, log_pred=False):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output).float()
if hasattr(model, 'get_target_weights'):
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
else:
weights = 1.0
loss = F.binary_cross_entropy_with_logits(logits, target, reduce=False)
loss = (loss * weights)
if reduce:
loss = loss.sum()
sample_size = target.numel()
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample_size, 'nsentences': logits.size(0), 'sample_size': sample_size}
if log_pred:
logging_output['logits'] = logits.cpu().numpy()
logging_output['target'] = target.cpu().numpy()
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output
|
@register_criterion('composite_loss')
class CompositeLoss(FairseqCriterion):
'This is a composite loss that, given a list of model outputs and a list of targets,\n computes an average of losses for each output-target pair'
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss')
@staticmethod
def build_underlying_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert (saved_criterion != args.underlying_criterion)
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
@classmethod
def build_criterion(cls, args, task):
underlying_criterion = CompositeLoss.build_underlying_criterion(args, task)
class FakeModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(net_output, log_probs, sample=sample)
def get_targets(self, *unused):
return self.target
@property
def decoder(self):
return self.model.decoder
class _CompositeLoss(FairseqCriterion):
def __init__(self, args, task, underlying_criterion):
super().__init__(args, task)
self.underlying_criterion = underlying_criterion
def forward(self, model, sample, reduce=True):
net_outputs = model(**sample['net_input'])
targets = sample['target']
bsz = targets[0].size(0)
loss = net_outputs[0][0].new((1 if reduce else bsz)).float().zero_()
sample_size = 0
logging_output = {}
for (o, t) in zip(net_outputs[0], targets):
m = FakeModel(model, (o, net_outputs[1]), t)
sample['target'] = t
(l, ss, logging_output) = self.underlying_criterion(m, sample, reduce)
loss += l
sample_size += ss
loss.div_(len(targets))
sample_size /= len(targets)
logging_output['loss'] = (utils.item(loss.data) if reduce else loss.data)
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
return underlying_criterion.__class__.aggregate_logging_outputs(logging_outputs)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
underlying_criterion.__class__.reduce_metrics(logging_outputs)
return _CompositeLoss(args, task, underlying_criterion)
|
@register_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['nll_loss'].avg), 3)))
else:
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['loss'].avg), 3)))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
class FairseqCriterion(_Loss):
def __init__(self, args, task):
super().__init__()
self.args = args
self.task = task
self.padding_idx = (task.target_dictionary.pad() if (task.target_dictionary is not None) else (- 100))
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
pass
@classmethod
def build_criterion(cls, args, task):
return cls(args, task)
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(logging_outputs: List[Dict[(str, Any)]]) -> Dict[(str, Any)]:
'Aggregate logging outputs from data parallel training.'
utils.deprecation_warning('The aggregate_logging_outputs API is deprecated. Please use the reduce_metrics API instead.')
raise NotImplementedError
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[(str, Any)]]) -> None:
'Aggregate logging outputs from data parallel training.'
utils.deprecation_warning('Criterions should implement the reduce_metrics API. Falling back to deprecated aggregate_logging_outputs API.')
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for (k, v) in agg_logging_outputs.items():
if (k in {'nsentences', 'ntokens', 'sample_size'}):
continue
metrics.log_scalar(k, v)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return False
|
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if (target.dim() == (lprobs.dim() - 1)):
target = target.unsqueeze((- 1))
nll_loss = (- lprobs.gather(dim=(- 1), index=target))
smooth_loss = (- lprobs.sum(dim=(- 1), keepdim=True))
if (ignore_index is not None):
pad_mask = target.eq(ignore_index)
if pad_mask.any():
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze((- 1))
smooth_loss = smooth_loss.squeeze((- 1))
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = (epsilon / lprobs.size((- 1)))
loss = (((1.0 - epsilon) * nll_loss) + (eps_i * smooth_loss))
return (loss, nll_loss)
|
@register_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
net_output = model(**sample['net_input'])
(loss, nll_loss) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1), 1)
(loss, nll_loss) = label_smoothed_nll_loss(lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce)
return (loss, nll_loss)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
nll_loss_sum = sum((log.get('nll_loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('nll_loss', ((nll_loss_sum / ntokens) / math.log(2)), ntokens, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['nll_loss'].avg), 3)))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
def compute_cross_entropy_loss(logits, targets, ignore_index=(- 100)):
'\n Function to compute the cross entropy loss. The default value of\n ignore_index is the same as the default value for F.cross_entropy in\n pytorch.\n '
assert (logits.size(0) == targets.size((- 1))), "Logits and Targets tensor shapes don't match up"
loss = F.nll_loss(F.log_softmax(logits, (- 1), dtype=torch.float32), targets, reduction='sum', ignore_index=ignore_index)
return loss
|
@register_criterion('legacy_masked_lm_loss')
class LegacyMaskedLmLoss(FairseqCriterion):
'\n Implementation for the loss used in masked language model (MLM) training.\n This optionally also computes the next sentence prediction (NSP) loss and\n adds it to the overall loss based on the specified args. There are three\n cases to consider:\n 1) Generic MLM training without NSP loss. In this case sentence_targets\n and sentence_logits are both None.\n 2) BERT training without NSP loss. In this case sentence_targets is\n not None but sentence_logits is None and we should not be computing\n a sentence level loss.\n 3) BERT training with NSP loss. In this case both sentence_targets and\n sentence_logits are not None and we should be computing a sentence\n level loss. The weight of the sentence level loss is specified as\n an argument.\n '
def __init__(self, args, task):
super().__init__(args, task)
@staticmethod
def add_args(parser):
'Args for MaskedLM Loss'
parser.add_argument('--masked-lm-only', default=False, action='store_true', help='compute MLM loss only')
parser.add_argument('--nsp-loss-weight', default=1.0, type=float, help='weight for next sentence prediction loss (default 1)')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
(lm_logits, output_metadata) = model(**sample['net_input'])
lm_logits = lm_logits.view((- 1), lm_logits.size((- 1)))
lm_targets = sample['lm_target'].view((- 1))
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = (lm_loss / ntokens)
nsentences = sample['nsentences']
sentence_loss = None
if (not self.args.masked_lm_only):
sentence_logits = output_metadata['sentence_logits']
sentence_targets = sample['sentence_target'].view((- 1))
nsentences = sentence_targets.size(0)
if (sentence_logits is not None):
sentence_loss = compute_cross_entropy_loss(sentence_logits, sentence_targets)
loss += (self.args.nsp_loss_weight * (sentence_loss / nsentences))
sample_size = 1
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'lm_loss': (utils.item(lm_loss.data) if reduce else lm_loss.data), 'sentence_loss': ((utils.item(sentence_loss.data) if reduce else sentence_loss.data) if (sentence_loss is not None) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
'Aggregate logging outputs from data parallel training.'
lm_loss_sum = sum((log.get('lm_loss', 0) for log in logging_outputs))
sentence_loss_sum = sum((log.get('sentence_loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_loss = sum((log.get('loss', 0) for log in logging_outputs))
agg_output = {'loss': (((agg_loss / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'lm_loss': (((lm_loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'sentence_loss': (((sentence_loss_sum / nsentences) / math.log(2)) if (nsentences > 0) else 0.0), 'nll_loss': (((lm_loss_sum / ntokens) / math.log(2)) if (ntokens > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
return agg_output
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
@register_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
'\n Implementation for the loss used in masked language model (MLM) training.\n '
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if (sample_size == 0):
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if (sample_size != 0):
targets = targets[masked_tokens]
loss = F.nll_loss(F.log_softmax(logits.view((- 1), logits.size((- 1))), dim=(- 1), dtype=torch.float32), targets.view((- 1)), reduction='sum', ignore_index=self.padding_idx)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size}
return (loss, sample_size, logging_output)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['loss'].avg), 3)))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
@register_criterion('nat_loss')
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
'Add criterion-specific arguments to the parser.'
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar='D', help='epsilon for label smoothing, 0 means no label smoothing')
def _compute_loss(self, outputs, targets, masks=None, label_smoothing=0.0, name='loss', factor=1.0):
'\n outputs: batch x len x d_model\n targets: batch x len\n masks: batch x len\n\n policy_logprob: if there is some policy\n depends on the likelihood score as rewards.\n '
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (x.float().mean().type_as(x) if (dim is None) else x.float().mean(dim).type_as(x))
if (masks is not None):
(outputs, targets) = (outputs[masks], targets[masks])
if ((masks is not None) and (not masks.any())):
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=(- 1))
if (targets.dim() == 1):
losses = F.nll_loss(logits, targets.to(logits.device), reduction='none')
else:
losses = F.kl_div(logits, targets.to(logits.device), reduction='none')
losses = losses.sum((- 1))
nll_loss = mean_ds(losses)
if (label_smoothing > 0):
loss = ((nll_loss * (1 - label_smoothing)) - (mean_ds(logits) * label_smoothing))
else:
loss = nll_loss
loss = (loss * factor)
return {'name': name, 'loss': loss, 'nll_loss': nll_loss, 'factor': factor}
def _custom_loss(self, loss, name='loss', factor=1.0):
return {'name': name, 'loss': loss, 'factor': factor}
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
(nsentences, ntokens) = (sample['nsentences'], sample['ntokens'])
(src_tokens, src_lengths) = (sample['net_input']['src_tokens'], sample['net_input']['src_lengths'])
(tgt_tokens, prev_output_tokens) = (sample['target'], sample['prev_target'])
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
(losses, nll_loss) = ([], [])
for obj in outputs:
if (outputs[obj].get('loss', None) is None):
_losses = self._compute_loss(outputs[obj].get('out'), outputs[obj].get('tgt'), outputs[obj].get('mask', None), outputs[obj].get('ls', 0.0), name=(obj + '-loss'), factor=outputs[obj].get('factor', 1.0))
else:
_losses = self._custom_loss(outputs[obj].get('loss'), name=(obj + '-loss'), factor=outputs[obj].get('factor', 1.0))
losses += [_losses]
if outputs[obj].get('nll_loss', False):
nll_loss += [_losses.get('nll_loss', 0.0)]
loss = sum((l['loss'] for l in losses))
nll_loss = (sum((l for l in nll_loss)) if (len(nll_loss) > 0) else loss.new_tensor(0))
sample_size = 1
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(nll_loss.data) if reduce else nll_loss.data), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
for l in losses:
logging_output[l['name']] = (utils.item((l['loss'].data / l['factor'])) if reduce else (l[['loss']].data / l['factor']))
return (loss, sample_size, logging_output)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
loss = sum((log.get('loss', 0) for log in logging_outputs))
nll_loss = sum((log.get('nll_loss', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_scalar('nll_loss', ((nll_loss / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_derived('ppl', (lambda meters: round((2 ** meters['nll_loss'].avg), 3)))
for key in logging_outputs[0]:
if (key[(- 5):] == '-loss'):
val = sum((log.get(key, 0) for log in logging_outputs))
metrics.log_scalar(key[:(- 5)], (((val / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), sample_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
@register_criterion('sentence_prediction')
class SentencePredictionCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
parser.add_argument('--classification-head-name', default='sentence_classification_head', help='name of the classification head to use')
def forward(self, model, sample, reduce=True):
'Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
assert (hasattr(model, 'classification_heads') and (self.args.classification_head_name in model.classification_heads)), 'model must provide sentence classification head for --criterion=sentence_prediction'
(logits, _) = model(**sample['net_input'], features_only=True, classification_head_name=self.args.classification_head_name)
targets = model.get_targets(sample, [logits]).view((- 1))
sample_size = targets.numel()
if (not self.args.regression_target):
loss = F.nll_loss(F.log_softmax(logits, dim=(- 1), dtype=torch.float32), targets, reduction='sum')
else:
logits = logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction='sum')
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size}
if (not self.args.regression_target):
preds = logits.argmax(dim=1)
logging_output['ncorrect'] = utils.item((preds == targets).sum())
return (loss, sample_size, logging_output)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
if ((len(logging_outputs) > 0) and ('ncorrect' in logging_outputs[0])):
ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs))
metrics.log_scalar('accuracy', ((100.0 * ncorrect) / nsentences), nsentences, round=1)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
@register_criterion('sentence_ranking')
class SentenceRankingCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if (self.args.save_predictions is not None):
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
def __del__(self):
if (self.prediction_h is not None):
self.prediction_h.close()
@staticmethod
def add_args(parser):
parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to')
parser.add_argument('--ranking-head-name', default='sentence_classification_head', help='name of the ranking head to use')
def forward(self, model, sample, reduce=True):
'Compute ranking loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n '
assert (hasattr(model, 'classification_heads') and (self.args.ranking_head_name in model.classification_heads)), 'model must provide sentence ranking head for --criterion=sentence_ranking'
scores = []
for idx in range(self.args.num_classes):
(score, _) = model(**sample['net_input{idx}'.format(idx=(idx + 1))], classification_head_name=self.args.ranking_head_name)
scores.append(score)
logits = torch.cat(scores, dim=1)
sample_size = logits.size(0)
if ('target' in sample):
targets = model.get_targets(sample, [logits]).view((- 1))
loss = F.nll_loss(F.log_softmax(logits, dim=(- 1), dtype=torch.float32), targets, reduction='sum')
else:
targets = None
loss = torch.tensor(0.0, requires_grad=True)
if (self.prediction_h is not None):
preds = logits.argmax(dim=1)
for (i, (id, pred)) in enumerate(zip(sample['id'].tolist(), preds.tolist())):
if (targets is not None):
label = targets[i].item()
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
else:
print('{}\t{}'.format(id, pred), file=self.prediction_h)
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size}
if (targets is not None):
logging_output['ncorrect'] = utils.item((logits.argmax(dim=1) == targets).sum())
return (loss, sample_size, logging_output)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
'Aggregate logging outputs from data parallel training.'
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
if ((len(logging_outputs) > 0) and ('ncorrect' in logging_outputs[0])):
ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs))
metrics.log_scalar('accuracy', ((100.0 * ncorrect) / nsentences), nsentences, round=1)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
'\n Whether the logging outputs returned by `forward` can be summed\n across workers prior to calling `reduce_metrics`. Setting this\n to True will improves distributed training speed.\n '
return True
|
class AppendTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if (token is not None):
self._sizes = (np.array(dataset.sizes) + 1)
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if (self.token is not None):
item = torch.cat([item, item.new([self.token])])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if (self.token is not None):
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if (self.token is not None):
n += 1
return n
|
class RawAudioDataset(FairseqDataset):
def __init__(self, sample_rate, max_sample_size=None, min_sample_size=None, shuffle=True, min_length=0):
super().__init__()
self.sample_rate = sample_rate
self.sizes = []
self.max_sample_size = (max_sample_size if (max_sample_size is not None) else sys.maxsize)
self.min_sample_size = (min_sample_size if (min_sample_size is not None) else self.max_sample_size)
self.min_length = min_length
self.shuffle = shuffle
def __getitem__(self, index):
raise NotImplementedError()
def __len__(self):
return len(self.sizes)
def postprocess(self, feats, curr_sample_rate):
def resample(x, factor):
return F.interpolate(x.view(1, 1, (- 1)), scale_factor=factor).squeeze()
if (feats.dim() == 2):
feats = feats.mean((- 1))
if (curr_sample_rate != self.sample_rate):
factor = (self.sample_rate / curr_sample_rate)
feats = resample(feats, factor)
assert (feats.dim() == 1), feats.dim()
return feats
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = (size - target_size)
if (diff <= 0):
return wav
start = np.random.randint(0, (diff + 1))
end = ((size - diff) + start)
return wav[start:end]
def collater(self, samples):
samples = [s for s in samples if ((s['source'] is not None) and (len(s['source']) > 0))]
if (len(samples) == 0):
return {}
sources = [s['source'] for s in samples]
sizes = [len(s) for s in sources]
target_size = min(min(sizes), self.max_sample_size)
if (target_size < self.min_length):
return {}
if (self.min_sample_size < target_size):
target_size = np.random.randint(self.min_sample_size, (target_size + 1))
collated_sources = sources[0].new(len(sources), target_size)
for (i, (source, size)) in enumerate(zip(sources, sizes)):
diff = (size - target_size)
assert (diff >= 0)
if (diff == 0):
collated_sources[i] = source
else:
collated_sources[i] = self.crop_to_max_size(source, target_size)
return {'id': torch.LongTensor([s['id'] for s in samples]), 'net_input': {'source': collated_sources}}
def num_tokens(self, index):
return self.size(index)
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
|
class FileAudioDataset(RawAudioDataset):
def __init__(self, manifest_path, sample_rate, max_sample_size=None, min_sample_size=None, shuffle=True, min_length=0):
super().__init__(sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, min_length=min_length)
self.fnames = []
with open(manifest_path, 'r') as f:
self.root_dir = f.readline().strip()
for line in f:
items = line.strip().split('\t')
assert (len(items) == 2), line
self.fnames.append(items[0])
self.sizes.append(int(items[1]))
def __getitem__(self, index):
import soundfile as sf
fname = os.path.join(self.root_dir, self.fnames[index])
(wav, curr_sample_rate) = sf.read(fname)
feats = torch.from_numpy(wav).float()
feats = self.postprocess(feats, curr_sample_rate)
return {'id': index, 'source': feats}
|
def backtranslate_samples(samples, collate_fn, generate_fn, cuda=True):
"Backtranslate a list of samples.\n\n Given an input (*samples*) of the form:\n\n [{'id': 1, 'source': 'hallo welt'}]\n\n this will return:\n\n [{'id': 1, 'source': 'hello world', 'target': 'hallo welt'}]\n\n Args:\n samples (List[dict]): samples to backtranslate. Individual samples are\n expected to have a 'source' key, which will become the 'target'\n after backtranslation.\n collate_fn (callable): function to collate samples into a mini-batch\n generate_fn (callable): function to generate backtranslations\n cuda (bool): use GPU for generation (default: ``True``)\n\n Returns:\n List[dict]: an updated list of samples with a backtranslated source\n "
collated_samples = collate_fn(samples)
s = (utils.move_to_cuda(collated_samples) if cuda else collated_samples)
generated_sources = generate_fn(s)
id_to_src = {sample['id']: sample['source'] for sample in samples}
return [{'id': id.item(), 'target': id_to_src[id.item()], 'source': hypos[0]['tokens'].cpu()} for (id, hypos) in zip(collated_samples['id'], generated_sources)]
|
class BacktranslationDataset(FairseqDataset):
'\n Sets up a backtranslation dataset which takes a tgt batch, generates\n a src using a tgt-src backtranslation function (*backtranslation_fn*),\n and returns the corresponding `{generated src, input tgt}` batch.\n\n Args:\n tgt_dataset (~fairseq.data.FairseqDataset): the dataset to be\n backtranslated. Only the source side of this dataset will be used.\n After backtranslation, the source sentences in this dataset will be\n returned as the targets.\n src_dict (~fairseq.data.Dictionary): the dictionary of backtranslated\n sentences.\n tgt_dict (~fairseq.data.Dictionary, optional): the dictionary of\n sentences to be backtranslated.\n backtranslation_fn (callable, optional): function to call to generate\n backtranslations. This is typically the `generate` method of a\n :class:`~fairseq.sequence_generator.SequenceGenerator` object.\n Pass in None when it is not available at initialization time, and\n use set_backtranslation_fn function to set it when available.\n output_collater (callable, optional): function to call on the\n backtranslated samples to create the final batch\n (default: ``tgt_dataset.collater``).\n cuda: use GPU for generation\n '
def __init__(self, tgt_dataset, src_dict, tgt_dict=None, backtranslation_fn=None, output_collater=None, cuda=True, **kwargs):
self.tgt_dataset = tgt_dataset
self.backtranslation_fn = backtranslation_fn
self.output_collater = (output_collater if (output_collater is not None) else tgt_dataset.collater)
self.cuda = (cuda if torch.cuda.is_available() else False)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def __getitem__(self, index):
'\n Returns a single sample from *tgt_dataset*. Note that backtranslation is\n not applied in this step; use :func:`collater` instead to backtranslate\n a batch of samples.\n '
return self.tgt_dataset[index]
def __len__(self):
return len(self.tgt_dataset)
def set_backtranslation_fn(self, backtranslation_fn):
self.backtranslation_fn = backtranslation_fn
def collater(self, samples):
'Merge and backtranslate a list of samples to form a mini-batch.\n\n Using the samples from *tgt_dataset*, load a collated target sample to\n feed to the backtranslation model. Then take the backtranslation with\n the best score as the source and the original input as the target.\n\n Note: we expect *tgt_dataset* to provide a function `collater()` that\n will collate samples into the format expected by *backtranslation_fn*.\n After backtranslation, we will feed the new list of samples (i.e., the\n `(backtranslated source, original source)` pairs) to *output_collater*\n and return the result.\n\n Args:\n samples (List[dict]): samples to backtranslate and collate\n\n Returns:\n dict: a mini-batch with keys coming from *output_collater*\n '
if samples[0].get('is_dummy', False):
return samples
samples = backtranslate_samples(samples=samples, collate_fn=self.tgt_dataset.collater, generate_fn=(lambda net_input: self.backtranslation_fn(net_input)), cuda=self.cuda)
return self.output_collater(samples)
def num_tokens(self, index):
'Just use the tgt dataset num_tokens'
return self.tgt_dataset.num_tokens(index)
def ordered_indices(self):
'Just use the tgt dataset ordered_indices'
return self.tgt_dataset.ordered_indices()
def size(self, index):
"Return an example's size as a float or tuple. This value is used\n when filtering a dataset with ``--max-positions``.\n\n Note: we use *tgt_dataset* to approximate the length of the source\n sentence, since we do not know the actual length until after\n backtranslation.\n "
tgt_size = self.tgt_dataset.size(index)[0]
return (tgt_size, tgt_size)
@property
def supports_prefetch(self):
return getattr(self.tgt_dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.tgt_dataset.prefetch(indices)
|
class BaseWrapperDataset(FairseqDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, 'collater'):
return self.dataset.collater(samples)
else:
return default_collate(samples)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
if hasattr(self.dataset, 'set_epoch'):
self.dataset.set_epoch(epoch)
|
class ColorizeDataset(BaseWrapperDataset):
" Adds 'colors' property to net input that is obtained from the provided color getter for use by models "
def __init__(self, dataset, color_getter):
super().__init__(dataset)
self.color_getter = color_getter
def collater(self, samples):
base_collate = super().collater(samples)
if (len(base_collate) > 0):
base_collate['net_input']['colors'] = torch.tensor(list((self.color_getter(self.dataset, s['id']) for s in samples)), dtype=torch.long)
return base_collate
|
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
(r, s) = ([], 0)
for (e, ratio) in zip(sequence, sample_ratios):
curr_len = int((ratio * len(e)))
r.append((curr_len + s))
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert (len(datasets) > 0), 'datasets should not be an empty iterable'
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = ([sample_ratios] * len(self.datasets))
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[(- 1)]
def __getitem__(self, idx):
(dataset_idx, sample_idx) = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
sample_idx = (sample_idx % self.real_sizes[dataset_idx])
return (dataset_idx, sample_idx)
def collater(self, samples):
if hasattr(self.datasets[0], 'collater'):
return self.datasets[0].collater(samples)
else:
return default_collate(samples)
def size(self, idx: int):
"\n Return an example's size as a float or tuple.\n "
(dataset_idx, sample_idx) = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for (ds, sr) in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all((d.supports_prefetch for d in self.datasets))
def ordered_indices(self):
'\n Returns indices sorted by length. So less padding is needed.\n '
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for (to, ds) in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, 'supports_prefetch', False):
ds.prefetch([((i - frm) % real_size) for i in indices if (frm <= i < to)])
frm = to
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, 'set_epoch'):
ds.set_epoch(epoch)
|
class ConcatSentencesDataset(FairseqDataset):
def __init__(self, *datasets):
super().__init__()
self.datasets = datasets
assert all(((len(ds) == len(datasets[0])) for ds in datasets)), 'datasets must have the same length'
def __getitem__(self, index):
return torch.cat([ds[index] for ds in self.datasets])
def __len__(self):
return len(self.datasets[0])
def collater(self, samples):
return self.datasets[0].collater(samples)
@property
def sizes(self):
return sum((ds.sizes for ds in self.datasets))
def num_tokens(self, index):
return sum((ds.num_tokens(index) for ds in self.datasets))
def size(self, index):
return sum((ds.size(index) for ds in self.datasets))
def ordered_indices(self):
return self.datasets[0].ordered_indices()
@property
def supports_prefetch(self):
return any((getattr(ds, 'supports_prefetch', False) for ds in self.datasets))
def prefetch(self, indices):
for ds in self.datasets:
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, 'set_epoch'):
ds.set_epoch(epoch)
|
def infer_language_pair(path):
'Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx'
(src, dst) = (None, None)
for filename in os.listdir(path):
parts = filename.split('.')
if ((len(parts) >= 3) and (len(parts[1].split('-')) == 2)):
return parts[1].split('-')
return (src, dst)
|
def collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):
'Convert a list of 1d tensors into a padded 2d tensor.'
size = max((v.size(0) for v in values))
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert (dst.numel() == src.numel())
if move_eos_to_beginning:
assert (src[(- 1)] == eos_idx)
dst[0] = eos_idx
dst[1:] = src[:(- 1)]
else:
dst.copy_(src)
for (i, v) in enumerate(values):
copy_tensor(v, (res[i][(size - len(v)):] if left_pad else res[i][:len(v)]))
return res
|
def load_indexed_dataset(path, dictionary, dataset_impl=None, combine=False, default='cached'):
"A helper function for loading indexed datasets.\n\n Args:\n path (str): path to indexed dataset (e.g., 'data-bin/train')\n dictionary (~fairseq.data.Dictionary): data dictionary\n dataset_impl (str, optional): which dataset implementation to use. If\n not provided, it will be inferred automatically. For legacy indexed\n data we use the 'cached' implementation by default.\n combine (bool, optional): automatically load and combine multiple\n datasets. For example, if *path* is 'data-bin/train', then we will\n combine 'data-bin/train', 'data-bin/train1', ... and return a\n single ConcatDataset instance.\n "
from fairseq.data.concat_dataset import ConcatDataset
import fairseq.data.indexed_dataset as indexed_dataset
datasets = []
for k in itertools.count():
path_k = (path + (str(k) if (k > 0) else ''))
dataset_impl_k = dataset_impl
if (dataset_impl_k is None):
dataset_impl_k = indexed_dataset.infer_dataset_impl(path_k)
dataset = indexed_dataset.make_dataset(path_k, impl=(dataset_impl_k or default), fix_lua_indexing=True, dictionary=dictionary)
if (dataset is None):
break
logger.info('loaded {} examples from: {}'.format(len(dataset), path_k))
datasets.append(dataset)
if (not combine):
break
if (len(datasets) == 0):
return None
elif (len(datasets) == 1):
return datasets[0]
else:
return ConcatDataset(datasets)
|
@contextlib.contextmanager
def numpy_seed(seed, *addl_seeds):
'Context manager which seeds the NumPy PRNG with the specified seed and\n restores the state afterward'
if (seed is None):
(yield)
return
if (len(addl_seeds) > 0):
seed = int((hash((seed, *addl_seeds)) % 1000000.0))
state = np.random.get_state()
np.random.seed(seed)
try:
(yield)
finally:
np.random.set_state(state)
|
def collect_filtered(function, iterable, filtered):
'\n Similar to :func:`filter` but collects filtered elements in ``filtered``.\n\n Args:\n function (callable): function that returns ``False`` for elements that\n should be filtered\n iterable (iterable): iterable to filter\n filtered (list): list to store filtered elements\n '
for el in iterable:
if function(el):
(yield el)
else:
filtered.append(el)
|
def _filter_by_size_dynamic(indices, size_fn, max_positions, raise_exception=False):
def check_size(idx):
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
return (size_fn(idx) <= max_positions)
elif isinstance(max_positions, dict):
idx_size = size_fn(idx)
assert isinstance(idx_size, dict)
intersect_keys = (set(max_positions.keys()) & set(idx_size.keys()))
return all((all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(idx_size[key], max_positions[key]))) for key in intersect_keys))
else:
if (isinstance(size_fn(idx), dict) and isinstance(max_positions, tuple)):
return all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(size_fn(idx).values(), max_positions)))
if (not isinstance(size_fn(idx), Iterable)):
return all(((size_fn(idx) <= b) for b in max_positions))
return all((((a is None) or (b is None) or (a <= b)) for (a, b) in zip(size_fn(idx), max_positions)))
ignored = []
itr = collect_filtered(check_size, indices, ignored)
indices = np.fromiter(itr, dtype=np.int64, count=(- 1))
return (indices, ignored)
|
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
'\n Filter indices based on their size.\n\n Args:\n indices (List[int]): ordered list of dataset indices\n dataset (FairseqDataset): fairseq dataset instance\n max_positions (tuple): filter elements larger than this size.\n Comparisons are done component-wise.\n raise_exception (bool, optional): if ``True``, raise an exception if\n any elements are filtered (default: False).\n '
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
if (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, np.ndarray)):
ignored = indices[(dataset.sizes[indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[indices] <= max_positions)]
elif (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, list) and (len(dataset.sizes) == 1)):
ignored = indices[(dataset.sizes[0][indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[0][indices] <= max_positions)]
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if ((len(ignored) > 0) and raise_exception):
raise Exception('Size of sample #{} is invalid (={}) since max_positions={}, skip this example with --skip-invalid-size-inputs-valid-test'.format(ignored[0], dataset.size(ignored[0]), max_positions))
if (len(ignored) > 0):
logger.warn('{} samples have invalid sizes and will be skipped, max_positions={}, first few sample ids={}'.format(len(ignored), max_positions, ignored[:10]))
return indices
|
def batch_by_size(indices, num_tokens_fn, max_tokens=None, max_sentences=None, required_batch_size_multiple=1):
'\n Yield mini-batches of indices bucketed by size. Batches may contain\n sequences of different lengths.\n\n Args:\n indices (List[int]): ordered list of dataset indices\n num_tokens_fn (callable): function that returns the number of tokens at\n a given index\n max_tokens (int, optional): max number of tokens in each batch\n (default: None).\n max_sentences (int, optional): max number of sentences in each\n batch (default: None).\n required_batch_size_multiple (int, optional): require batch size to\n be a multiple of N (default: 1).\n '
try:
from fairseq.data.data_utils_fast import batch_by_size_fast
except ImportError:
raise ImportError('Please build Cython components with: `pip install --editable .` or `python setup.py build_ext --inplace`')
max_tokens = (max_tokens if (max_tokens is not None) else (- 1))
max_sentences = (max_sentences if (max_sentences is not None) else (- 1))
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=(- 1))
return batch_by_size_fast(indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult)
|
def process_bpe_symbol(sentence: str, bpe_symbol: str):
if (bpe_symbol == 'sentencepiece'):
sentence = sentence.replace(' ', '').replace('▁', ' ').strip()
elif (bpe_symbol == '_EOW'):
sentence = sentence.replace(' ', '').replace('_EOW', ' ').strip()
elif (bpe_symbol is not None):
sentence = (sentence + ' ').replace(bpe_symbol, '').rstrip()
return sentence
|
def collate(samples, pad_idx, eos_idx, vocab, left_pad_source=False, left_pad_target=False, input_feeding=True):
assert input_feeding
if (len(samples) == 0):
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
(src_lengths, sort_order) = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if (samples[0].get('target', None) is not None):
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
ntokens = sum((len(s['target']) for s in samples))
if input_feeding:
prev_output_tokens = merge('target', left_pad=left_pad_target, move_eos_to_beginning=True)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum((len(s['source']) for s in samples))
batch = {'id': id, 'ntokens': ntokens, 'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}, 'target': target, 'nsentences': samples[0]['source'].size(0)}
if (prev_output_tokens is not None):
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch
|
class DenoisingDataset(FairseqDataset):
'\n A wrapper around TokenBlockDataset for BART dataset.\n\n Args:\n dataset (TokenBlockDataset): dataset to wrap\n sizes (List[int]): sentence lengths\n vocab (~fairseq.data.Dictionary): vocabulary\n mask_idx (int): dictionary index used for masked token\n mask_whole_words: only mask whole words. This should be a byte mask\n over vocab indices, indicating whether it is the beginning of a\n word. We will extend any mask to encompass the whole word.\n shuffle (bool, optional): shuffle the elements before batching.\n Default: ``True``\n seed: Seed for random number generator for reproducibility.\n args: argparse arguments.\n '
def __init__(self, dataset, sizes, vocab, mask_idx, mask_whole_words, shuffle, seed, args):
self.dataset = dataset
self.sizes = sizes
self.vocab = vocab
self.shuffle = shuffle
self.seed = seed
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.rotate_ratio = args.rotate
self.permute_sentence_ratio = args.permute_sentences
if (args.bpe != 'gpt2'):
self.full_stop_index = self.vocab.index('.')
else:
assert (args.bpe == 'gpt2')
self.full_stop_index = self.vocab.index('13')
self.replace_length = args.replace_length
if (not (self.replace_length in [(- 1), 0, 1])):
raise f'invalid arg: replace_length={self.replace_length}'
if (not (args.mask_length in ['subword', 'word', 'span-poisson'])):
raise f'invalid arg: mask-length={args.mask_length}'
if ((args.mask_length == 'subword') and (not (args.replace_length in [0, 1]))):
raise f'if using subwords, use replace-length=1 or 0'
self.mask_span_distribution = None
if (args.mask_length == 'span-poisson'):
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp((- _lambda))
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(((e_to_the_minus_lambda * lambda_to_the_k) / k_factorial))
lambda_to_the_k *= _lambda
k_factorial *= (k + 1)
if (ps[(- 1)] < 1e-07):
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
tokens = self.dataset[index]
assert (tokens[(- 1)] == self.vocab.eos())
(source, target) = (tokens, tokens.clone())
if (self.permute_sentence_ratio > 0.0):
source = self.permute_sentences(source, self.permute_sentence_ratio)
if (self.mask_ratio > 0):
source = self.add_whole_word_mask(source, self.mask_ratio)
if (self.insert_ratio > 0):
source = self.add_insertion_noise(source, self.insert_ratio)
if ((self.rotate_ratio > 0.0) and (np.random.random() < self.rotate_ratio)):
source = self.add_rolling_noise(source)
assert (source >= 0).all()
assert (source[1:(- 1)] >= 1).all()
assert (source <= len(self.vocab)).all()
assert (source[0] == self.vocab.bos())
assert (source[(- 1)] == self.vocab.eos())
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def permute_sentences(self, source, p=1.0):
full_stops = (source == self.full_stop_index)
full_stops[(- 2)] = 1
sentence_ends = ((full_stops[1:] * (~ full_stops[:(- 1)])).nonzero() + 2)
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((((num_sentences * 2) * p) / 2.0))
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
index = 1
for i in ordering:
sentence = source[(sentence_ends[(i - 1)] if (i > 0) else 1):sentence_ends[i]]
result[index:(index + sentence.size(0))] = sentence
index += sentence.size(0)
return result
def word_starts(self, source):
if (self.mask_whole_word is not None):
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[(- 1)] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil((is_word_start.float().sum() * p)))
num_inserts = 0
if (num_to_mask == 0):
return source
if (self.mask_span_distribution is not None):
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
cum_length = torch.cumsum(lengths, 0)
while (cum_length[(- 1)] < num_to_mask):
lengths = torch.cat([lengths, self.mask_span_distribution.sample(sample_shape=(num_to_mask,))], dim=0)
cum_length = torch.cumsum(lengths, 0)
i = 0
while (cum_length[i] < num_to_mask):
i += 1
lengths[i] = (num_to_mask - (0 if (i == 0) else cum_length[(i - 1)]))
num_to_mask = (i + 1)
lengths = lengths[:num_to_mask]
lengths = lengths[(lengths > 0)]
num_inserts = (num_to_mask - lengths.size(0))
num_to_mask -= num_inserts
if (num_to_mask == 0):
return self.add_insertion_noise(source, (num_inserts / source.size(0)))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert (is_word_start[(- 1)] == 0)
word_starts = is_word_start.nonzero()
indices = word_starts[torch.randperm(word_starts.size(0))[:num_to_mask]].squeeze(1)
mask_random = (torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio)
source_length = source.size(0)
assert ((source_length - 1) not in indices)
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[(- 1)] = 255
if (self.replace_length == 0):
to_keep[indices] = 0
else:
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
if (self.mask_span_distribution is not None):
assert (len(lengths.size()) == 1)
assert (lengths.size() == indices.size())
lengths -= 1
while (indices.size(0) > 0):
assert (lengths.size() == indices.size())
lengths -= is_word_start[(indices + 1)].long()
uncompleted = (lengths >= 0)
indices = (indices[uncompleted] + 1)
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if (self.replace_length != (- 1)):
to_keep[indices] = 0
else:
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
else:
while (indices.size(0) > 0):
uncompleted = (is_word_start[(indices + 1)] == 0)
indices = (indices[uncompleted] + 1)
mask_random = mask_random[uncompleted]
if (self.replace_length != (- 1)):
to_keep[indices] = 0
else:
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(1, len(self.vocab), size=(mask_random.sum(),))
assert ((source_length - 1) not in indices)
source = source[to_keep]
if (num_inserts > 0):
source = self.add_insertion_noise(source, (num_inserts / source.size(0)))
return source
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil((((num_words * 2) * p) / 2.0))
substitutions = (torch.randperm((num_words - 2))[:num_to_permute] + 1)
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, (max(1, (tokens.size((- 1)) - 1)) + 1))
tokens = torch.cat((tokens[0:1], tokens[offset:(- 1)], tokens[1:offset], tokens[(- 1):]), dim=0)
return tokens
def add_insertion_noise(self, tokens, p):
if (p == 0.0):
return tokens
num_tokens = len(tokens)
n = int(math.ceil((num_tokens * p)))
noise_indices = (torch.randperm(((num_tokens + n) - 2))[:n] + 1)
noise_mask = torch.zeros(size=((num_tokens + n),), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor((n + len(tokens))).fill_((- 1))
num_random = int(math.ceil((n * self.random_ratio)))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))
result[(~ noise_mask)] = tokens
assert (result >= 0).all()
return result
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n Args:\n samples (List[dict]): samples to collate\n Returns:\n dict: a mini-batch of data\n '
return collate(samples, self.vocab.pad(), self.vocab.eos(), self.vocab)
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
return self.sizes[index]
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return self.sizes[index]
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices[np.argsort(self.sizes[indices], kind='mergesort')]
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
@property
def supports_prefetch(self):
return (hasattr(self.src, 'supports_prefetch') and self.src.supports_prefetch and hasattr(self.tgt, 'supports_prefetch') and self.tgt.supports_prefetch)
|
class Dictionary(object):
'A mapping from symbols to consecutive integers'
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', bos='<s>', extra_special_symbols=None):
(self.unk_word, self.pad_word, self.eos_word) = (unk, pad, eos)
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return (self.indices == other.indices)
def __getitem__(self, idx):
if (idx < len(self.symbols)):
return self.symbols[idx]
return self.unk_word
def __len__(self):
'Returns the number of symbols in the dictionary'
return len(self.symbols)
def __contains__(self, sym):
return (sym in self.indices)
def index(self, sym):
'Returns the index of the specified symbol'
assert isinstance(sym, str)
if (sym in self.indices):
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
'Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n '
if (torch.is_tensor(tensor) and (tensor.dim() == 2)):
return '\n'.join((self.string(t, bpe_symbol, escape_unk) for t in tensor))
def token_string(i):
if (i == self.unk()):
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, 'bos_index'):
sent = ' '.join((token_string(i) for i in tensor if ((i != self.eos()) and (i != self.bos()))))
else:
sent = ' '.join((token_string(i) for i in tensor if (i != self.eos())))
return data_utils.process_bpe_symbol(sent, bpe_symbol)
def unk_string(self, escape=False):
'Return unknown string, optionally escaped as: <<unk>>'
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
'Adds a word to the dictionary'
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + n)
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
'Updates counts from new dictionary.'
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + new_dict.count[idx2])
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=(- 1), nwords=(- 1), padding_factor=8):
'Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n '
if (nwords <= 0):
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(sorted(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))))
for (symbol, count) in c.most_common((nwords - self.nspecial)):
if (count >= threshold):
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert (len(new_symbols) == len(new_indices))
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
self.pad_to_multiple_(padding_factor)
def pad_to_multiple_(self, padding_factor):
'Pad Dictionary size to be a multiple of *padding_factor*.'
if (padding_factor > 1):
i = 0
while ((len(self) % padding_factor) != 0):
symbol = 'madeupword{:04d}'.format(i)
self.add_symbol(symbol, n=0)
i += 1
def bos(self):
'Helper to get index of beginning-of-sentence symbol'
return self.bos_index
def pad(self):
'Helper to get index of pad symbol'
return self.pad_index
def eos(self):
'Helper to get index of end-of-sentence symbol'
return self.eos_index
def unk(self):
'Helper to get index of unk symbol'
return self.unk_index
@classmethod
def load(cls, f):
'Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n '
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
'\n Loads a pre-existing dictionary from a text file and adds its symbols\n to this instance.\n '
if isinstance(f, str):
try:
with PathManager.open(f, 'r', encoding='utf-8') as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(f))
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
idx = line.rfind(' ')
if (idx == (- 1)):
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
count = int(line[(idx + 1):])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
def _save(self, f, kv_iterator):
if isinstance(f, str):
PathManager.mkdirs(os.path.dirname(f))
with PathManager.open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for (k, v) in kv_iterator:
print('{} {}'.format(k, v), file=f)
def _get_meta(self):
return ([], [])
def _load_meta(self, lines):
return 0
def save(self, f):
'Stores dictionary into a text file'
(ex_keys, ex_vals) = self._get_meta()
self._save(f, zip((ex_keys + self.symbols[self.nspecial:]), (ex_vals + self.count[self.nspecial:])))
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_((self.nspecial + 1), len(self)).long()
t[(- 1)] = self.eos()
return t
def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(((nwords + 1) if append_eos else nwords))
for (i, word) in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if (consumer is not None):
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
counter = Counter()
with open(PathManager.get_local_path(filename), 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = (size // num_workers)
offset = (worker_id * chunk_size)
end = (offset + chunk_size)
f.seek(offset)
if (offset > 0):
safe_readline(f)
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if (f.tell() > end):
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for (w, c) in sorted(counter.items()):
dict.add_symbol(w, c)
if (num_workers > 1):
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(Dictionary._add_file_to_dictionary_single_worker, (filename, tokenize, dict.eos_word, worker_id, num_workers)))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))
|
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(wrapped_dict.__class__.__name__, (self.__class__, wrapped_dict.__class__), {})
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if (i < self.length):
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
|
@register_bpe('fastbpe')
class fastBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--bpe-codes', type=str, help='path to fastBPE BPE')
def __init__(self, args):
if (args.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=subword_nmt')
codes = file_utils.cached_path(args.bpe_codes)
try:
import fastBPE
self.bpe = fastBPE.fastBPE(codes)
self.bpe_symbol = '@@ '
except ImportError:
raise ImportError('Please install fastBPE with: pip install fastBPE')
def encode(self, x: str) -> str:
return self.bpe.apply([x])[0]
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip()
|
@register_bpe('gpt2')
class GPT2BPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--gpt2-encoder-json', type=str, default=DEFAULT_ENCODER_JSON, help='path to encoder.json')
parser.add_argument('--gpt2-vocab-bpe', type=str, default=DEFAULT_VOCAB_BPE, help='path to vocab.bpe')
def __init__(self, args):
encoder_json = file_utils.cached_path(getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON))
vocab_bpe = file_utils.cached_path(getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE))
self.bpe = get_encoder(encoder_json, vocab_bpe)
def encode(self, x: str) -> str:
return ' '.join(map(str, self.bpe.encode(x)))
def decode(self, x: str) -> str:
return self.bpe.decode([(int(tok) if (tok not in {'<unk>', '<mask>'}) else tok) for tok in x.split()])
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(' ')
|
@lru_cache()
def bytes_to_unicode():
"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n "
bs = ((list(range(ord('!'), (ord('~') + 1))) + list(range(ord('¡'), (ord('¬') + 1)))) + list(range(ord('®'), (ord('ÿ') + 1))))
cs = bs[:]
n = 0
for b in range((2 ** 8)):
if (b not in bs):
bs.append(b)
cs.append(((2 ** 8) + n))
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
|
def get_pairs(word):
'Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
|
class Encoder():
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
try:
import regex as re
self.re = re
except ImportError:
raise ImportError('Please install regex with: pip install regex')
self.pat = self.re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in self.re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder.get(token, token) for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
|
def get_encoder(encoder_json_path, vocab_bpe_path):
with open(encoder_json_path, 'r') as f:
encoder = json.load(f)
with open(vocab_bpe_path, 'r', encoding='utf-8') as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:(- 1)]]
return Encoder(encoder=encoder, bpe_merges=bpe_merges)
|
@register_bpe('bert')
class BertBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--bpe-cased', action='store_true', help='set for cased BPE', default=False)
parser.add_argument('--bpe-vocab-file', type=str, help='bpe vocab file.')
def __init__(self, args):
try:
from pytorch_transformers import BertTokenizer
from pytorch_transformers.tokenization_utils import clean_up_tokenization
except ImportError:
raise ImportError('Please install 1.0.0 version of pytorch_transformerswith: pip install pytorch-transformers')
if ('bpe_vocab_file' in args):
self.bert_tokenizer = BertTokenizer(args.bpe_vocab_file, do_lower_case=(not args.bpe_cased))
else:
vocab_file_name = ('bert-base-cased' if args.bpe_cased else 'bert-base-uncased')
self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name)
self.clean_up_tokenization = clean_up_tokenization
def encode(self, x: str) -> str:
return ' '.join(self.bert_tokenizer.tokenize(x))
def decode(self, x: str) -> str:
return self.clean_up_tokenization(self.bert_tokenizer.convert_tokens_to_string(x.split(' ')))
def is_beginning_of_word(self, x: str) -> bool:
return (not x.startswith('##'))
|
@register_tokenizer('moses')
class MosesTokenizer(object):
@staticmethod
def add_args(parser):
parser.add_argument('--moses-source-lang', metavar='SRC', help='source language')
parser.add_argument('--moses-target-lang', metavar='TARGET', help='target language')
parser.add_argument('--moses-no-dash-splits', action='store_true', default=False, help="don't apply dash split rules")
parser.add_argument('--moses-no-escape', action='store_true', default=False, help="don't perform HTML escaping on apostrophy, quotes, etc.")
def __init__(self, args):
self.args = args
if (getattr(args, 'moses_source_lang', None) is None):
args.moses_source_lang = getattr(args, 'source_lang', 'en')
if (getattr(args, 'moses_target_lang', None) is None):
args.moses_target_lang = getattr(args, 'target_lang', 'en')
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(args.moses_source_lang)
self.detok = MosesDetokenizer(args.moses_target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(x, aggressive_dash_splits=(not self.args.moses_no_dash_splits), return_str=True, escape=(not self.args.moses_no_escape))
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split())
|
@register_tokenizer('nltk')
class NLTKTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
try:
from nltk.tokenize import word_tokenize
self.word_tokenize = word_tokenize
except ImportError:
raise ImportError('Please install nltk with: pip install nltk')
def encode(self, x: str) -> str:
return ' '.join(self.word_tokenize(x))
def decode(self, x: str) -> str:
return x
|
@register_bpe('sentencepiece')
class SentencepieceBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--sentencepiece-vocab', type=str, help='path to sentencepiece vocab')
def __init__(self, args):
vocab = file_utils.cached_path(args.sentencepiece_vocab)
try:
import sentencepiece as spm
self.sp = spm.SentencePieceProcessor()
self.sp.Load(vocab)
except ImportError:
raise ImportError('Please install sentencepiece with: pip install sentencepiece')
def encode(self, x: str) -> str:
return ' '.join(self.sp.EncodeAsPieces(x))
def decode(self, x: str) -> str:
return x.replace(' ', '').replace('▁', ' ').strip()
def is_beginning_of_word(self, x: str) -> bool:
if (x in ['<unk>', '<s>', '</s>', '<pad>']):
return True
return x.startswith('▁')
|
@register_tokenizer('space')
class SpaceTokenizer(object):
def __init__(self, source_lang=None, target_lang=None):
self.space_tok = re.compile('\\s+')
def encode(self, x: str) -> str:
return self.space_tok.sub(' ', x)
def decode(self, x: str) -> str:
return x
|
@register_bpe('subword_nmt')
class SubwordNMTBPE(object):
@staticmethod
def add_args(parser):
parser.add_argument('--bpe-codes', type=str, help='path to subword NMT BPE')
parser.add_argument('--bpe-separator', default='@@', help='BPE separator')
def __init__(self, args):
if (args.bpe_codes is None):
raise ValueError('--bpe-codes is required for --bpe=subword_nmt')
codes = file_utils.cached_path(args.bpe_codes)
try:
from subword_nmt import apply_bpe
bpe_parser = apply_bpe.create_parser()
bpe_args = bpe_parser.parse_args(['--codes', codes, '--separator', args.bpe_separator])
self.bpe = apply_bpe.BPE(bpe_args.codes, bpe_args.merges, bpe_args.separator, None, bpe_args.glossaries)
self.bpe_symbol = (bpe_args.separator + ' ')
except ImportError:
raise ImportError('Please install subword_nmt with: pip install subword-nmt')
def encode(self, x: str) -> str:
return self.bpe.process_line(x)
def decode(self, x: str) -> str:
return (x + ' ').replace(self.bpe_symbol, '').rstrip()
|
def get_whole_word_mask(args, dictionary):
bpe = encoders.build_bpe(args)
if (bpe is not None):
def is_beginning_of_word(i):
if (i < dictionary.nspecial):
return True
tok = dictionary[i]
if tok.startswith('madeupword'):
return True
try:
return bpe.is_beginning_of_word(tok)
except ValueError:
return True
mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(dictionary)))))
return mask_whole_words
return None
|
class EpochListening():
'Mixin for receiving updates whenever the epoch increments.'
def set_epoch(self, epoch):
'Will receive the updated epoch number at the beginning of the epoch.\n '
pass
|
class FairseqDataset(torch.utils.data.Dataset, EpochListening):
'A dataset that provides helpers for batching.'
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch suitable for forwarding with a Model\n '
raise NotImplementedError
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
raise NotImplementedError
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
raise NotImplementedError
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
return np.arange(len(self))
@property
def supports_prefetch(self):
'Whether this dataset supports prefetching.'
return False
def attr(self, attr: str, index: int):
return getattr(self, attr, None)
def prefetch(self, indices):
'Prefetch the data required for this epoch.'
raise NotImplementedError
|
class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening):
"For datasets that need to be read sequentially, usually because the data\n is being streamed or otherwise can't be manipulated on a single machine.\n "
def __iter__(self):
raise NotImplementedError
|
class IdDataset(FairseqDataset):
def __getitem__(self, index):
return index
def __len__(self):
return 0
def collater(self, samples):
return torch.tensor(samples)
|
def __best_fitting_dtype(vocab_size=None):
if ((vocab_size is not None) and (vocab_size < 65500)):
return np.uint16
else:
return np.int32
|
def get_available_dataset_impl():
return ['raw', 'lazy', 'cached', 'mmap']
|
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return 'raw'
elif IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if (magic == IndexedDataset._HDR_MAGIC):
return 'cached'
elif (magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]):
return 'mmap'
else:
return None
else:
return None
|
def make_builder(out_file, impl, vocab_size=None):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
else:
return IndexedDatasetBuilder(out_file)
|
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if ((impl == 'raw') and IndexedRawTextDataset.exists(path)):
assert (dictionary is not None)
return IndexedRawTextDataset(path, dictionary)
elif ((impl == 'lazy') and IndexedDataset.exists(path)):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'cached') and IndexedDataset.exists(path)):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'mmap') and MMapIndexedDataset.exists(path)):
return MMapIndexedDataset(path)
return None
|
def dataset_exists(path, impl):
if (impl == 'raw'):
return IndexedRawTextDataset.exists(path)
elif (impl == 'mmap'):
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
|
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
|
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
|
def code(dtype):
for k in dtypes.keys():
if (dtypes[k] == dtype):
return k
raise ValueError(dtype)
|
def index_file_path(prefix_path):
return (prefix_path + '.idx')
|
def data_file_path(prefix_path):
return (prefix_path + '.bin')
|
class IndexedDataset(FairseqDataset):
'Loader for TorchNet IndexedDataset'
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert (magic == self._HDR_MAGIC), "Index file doesn't match expected format. Make sure that --dataset-impl is configured properly."
version = f.read(8)
assert (struct.unpack('<Q', version) == (1,))
(code, self.element_size) = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
(self._len, self.s) = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, (self._len + 1))
self.data_offsets = read_longs(f, (self._len + 1))
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if ((i < 0) or (i >= self._len)):
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if (not self.data_file):
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek((self.data_offsets[i] * self.element_size))
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
@property
def supports_prefetch(self):
return False
|
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(((i in self.cache_index) for i in indices)):
return
if (not self.data_file):
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += (self.data_offsets[(i + 1)] - self.data_offsets[i])
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = (self.data_offsets[(i + 1)] - self.data_offsets[i])
a = self.cache[ptx:(ptx + size)]
self.data_file.seek((self.data_offsets[i] * self.element_size))
self.data_file.readinto(a)
ptx += size
if self.data_file:
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx:(ptx + a.size)])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1
return item
|
class IndexedRawTextDataset(FairseqDataset):
'Takes a text file as input and binarizes it in memory at instantiation.\n Original lines are also kept in memory'
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = dictionary.encode_line(line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if ((i < 0) or (i >= self.size)):
raise IndexError('index out of range')
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(path)
|
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
bytes = self.out_file.write(np.array((tensor.numpy() + 1), dtype=self.dtype))
self.data_offsets.append((self.data_offsets[(- 1)] + (bytes / self.element_size)))
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append((self.dim_offsets[(- 1)] + len(tensor.size())))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert (index.dtype == self.dtype)
begin = self.data_offsets[(- 1)]
for offset in index.data_offsets[1:]:
self.data_offsets.append((begin + offset))
self.sizes.extend(index.sizes)
begin = self.dim_offsets[(- 1)]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append((begin + dim_offset))
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', (len(self.data_offsets) - 1), len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
|
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(((100 * 1024) * 1024)):
pass
|
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += (size * dtype_size)
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert (self._HDR_MAGIC == magic_test), "Index file doesn't match expected format. Make sure that --dataset-impl is configured properly."
version = struct.unpack('<Q', stream.read(8))
assert ((1,) == version)
(dtype_code,) = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len, offset=(offset + self._sizes.nbytes))
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return (self._pointers[i], self._sizes[i])
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
(ptr, size) = self._index[i]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
if (self._index.dtype != np.int64):
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
|
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert (index.dtype == self._dtype)
for size in index.sizes:
self._sizes.append(size)
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
|
class CountingIterator(object):
'Wrapper around an iterable that maintains the iteration count.\n\n Args:\n iterable (iterable): iterable to wrap\n\n Attributes:\n count (int): number of elements consumed from this iterator\n '
def __init__(self, iterable, start=0):
self.iterable = iterable
self.count = start
self.itr = iter(self)
self.len = (start + len(iterable))
def __len__(self):
return self.len
def __iter__(self):
for x in self.iterable:
if (self.count >= self.len):
return
self.count += 1
(yield x)
def __next__(self):
return next(self.itr)
def has_next(self):
'Whether the iterator has been exhausted.'
return (self.count < len(self))
def skip(self, num_to_skip):
'Fast-forward the iterator by skipping *num_to_skip* elements.'
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
'\n Truncates the iterator to n elements at most.\n '
self.len = min(self.len, n)
|
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
'Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n '
raise NotImplementedError
def end_of_epoch(self) -> bool:
'Returns whether the most recent epoch iterator has been exhausted'
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
'The number of consumed batches in the current epoch.'
raise NotImplementedError
def state_dict(self):
'Returns a dictionary containing a whole state of the iterator.'
raise NotImplementedError
def load_state_dict(self, state_dict):
'Copies the state of the iterator from the given *state_dict*.'
raise NotImplementedError
|
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(self, dataset, epoch=0, num_shards=1, shard_id=0):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = epoch
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch += 1
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(iterable=ShardedIterator(iterable=self.dataset, num_shards=self.num_shards, shard_id=self.shard_id))
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return (not self._current_epoch_iterator.has_next())
@property
def iterations_in_epoch(self) -> int:
if (self._current_epoch_iterator is not None):
return self._current_epoch_iterator.count
return 0
def state_dict(self):
return {'epoch': self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
|
class EpochBatchIterator(EpochBatchIterating):
'A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.\n\n Compared to :class:`torch.utils.data.DataLoader`, this iterator:\n\n - can be reused across multiple epochs with the :func:`next_epoch_itr`\n method (optionally shuffled between epochs)\n - can be serialized/deserialized with the :func:`state_dict` and\n :func:`load_state_dict` methods\n - supports sharding with the *num_shards* and *shard_id* arguments\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset from which to load the data\n collate_fn (callable): merges a list of samples to form a mini-batch\n batch_sampler (~torch.utils.data.Sampler): an iterator over batches of\n indices\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 0).\n '
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
self.epoch = epoch
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
'Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n '
if (self._next_epoch_itr is not None):
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus)
self.dataset.set_epoch(self.epoch)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
'Returns whether the most recent epoch iterator has been exhausted'
return (not self._cur_epoch_itr.has_next())
@property
def iterations_in_epoch(self):
'The number of consumed batches in the current epoch.'
if (self._cur_epoch_itr is not None):
return self._cur_epoch_itr.count
elif (self._next_epoch_itr is not None):
return self._next_epoch_itr.count
return 0
def state_dict(self):
'Returns a dictionary containing a whole state of the iterator.'
return {'epoch': self.epoch, 'iterations_in_epoch': self.iterations_in_epoch, 'shuffle': self.shuffle}
def load_state_dict(self, state_dict):
'Copies the state of the iterator from the given *state_dict*.'
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if (itr_pos > 0):
self._next_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle=state_dict.get('shuffle', True), offset=itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if (shuffle and (not fix_batches_to_gpus)):
batches = shuffle_batches(list(batches), (self.seed + epoch))
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
self.dataset.prefetch([i for s in batches for i in s])
if (shuffle and fix_batches_to_gpus):
batches = shuffle_batches(batches, ((self.seed + epoch) + self.shard_id))
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), (self.seed + epoch))
else:
batches = self.frozen_batches
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
if ((offset > 0) and (offset >= len(batches))):
return None
if (self.num_workers > 0):
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
return CountingIterator(torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers), start=offset)
|
class GroupedIterator(object):
'Wrapper around an iterable that returns groups (chunks) of items.\n\n Args:\n iterable (iterable): iterable to wrap\n chunk_size (int): size of each chunk\n '
def __init__(self, iterable, chunk_size):
self._len = int(math.ceil((len(iterable) / float(chunk_size))))
self.offset = int(math.ceil((getattr(iterable, 'count', 0) / float(chunk_size))))
self.itr = iterable
self.chunk_size = chunk_size
def __len__(self):
return self._len
def __iter__(self):
return self
def __next__(self):
chunk = []
try:
for _ in range(self.chunk_size):
chunk.append(next(self.itr))
except StopIteration as e:
if (len(chunk) == 0):
raise e
return chunk
|
class ShardedIterator(object):
"A sharded wrapper around an iterable, padded to length.\n\n Args:\n iterable (iterable): iterable to wrap\n num_shards (int): number of shards to split the iterable into\n shard_id (int): which shard to iterator over\n fill_value (Any, optional): padding value when the iterable doesn't\n evenly divide *num_shards* (default: None).\n "
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if ((shard_id < 0) or (shard_id >= num_shards)):
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = (len(iterable) // num_shards)
if ((len(iterable) % num_shards) > 0):
self._sharded_len += 1
self.itr = itertools.zip_longest(range(self._sharded_len), itertools.islice(iterable, shard_id, len(iterable), num_shards), fillvalue=fill_value)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
|
class BlockPairDataset(FairseqDataset):
"Break a Dataset of tokens into sentence pair blocks for next sentence\n prediction as well as masked language model.\n\n High-level logics are:\n 1. break input tensor to tensor blocks\n 2. pair the blocks with 50% next sentence and 50% random sentence\n 3. return paired blocks as well as related segment labels\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to break into blocks\n sizes: array of sentence lengths\n dictionary: dictionary for the task\n block_size: maximum block size\n break_mode: mode for breaking copurs into block pairs. currently we support\n 2 modes\n doc: respect document boundaries and each part of the pair should belong to on document\n none: don't respect any boundary and cut tokens evenly\n short_seq_prob: probability for generating shorter block pairs\n doc_break_size: Size for empty line separating documents. Typically 1 if\n the sentences have eos, 0 otherwise.\n "
def __init__(self, dataset, dictionary, sizes, block_size, break_mode='doc', short_seq_prob=0.1, doc_break_size=1):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert (len(dataset) == len(sizes))
if (break_mode == 'doc'):
cur_doc = []
for (sent_id, sz) in enumerate(sizes):
assert ((doc_break_size == 0) or (sz != 0)), 'when doc_break_size is non-zero, we expect documents to beseparated by a blank line with a single eos.'
if (sz == doc_break_size):
if (len(cur_doc) == 0):
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = (block_size - 3)
self.sent_pairs = []
self.sizes = []
for (doc_id, doc) in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif ((break_mode is None) or (break_mode == 'none')):
sent_length = ((block_size - 3) // 2)
total_len = sum(dataset.sizes)
length = math.ceil((total_len / sent_length))
def block_at(i):
start = (i * sent_length)
end = min((start + sent_length), total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([(e - s) for (s, e) in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
self._pair_sentences(dataset_index)
else:
raise ValueError(('Invalid break_mode: ' + break_mode))
def _pair_sentences(self, dataset_index):
'\n Give a list of evenly cut blocks/sentences, pair these sentences with 50%\n consecutive sentences and 50% random sentences.\n This is used for none break mode\n '
for (sent_id, sent) in enumerate(dataset_index):
next_sent_label = (1 if ((np.random.rand() > 0.5) and (sent_id != (len(dataset_index) - 1))) else 0)
if next_sent_label:
next_sent = dataset_index[(sent_id + 1)]
else:
next_sent = dataset_index[self._skip_sampling(len(dataset_index), [sent_id, (sent_id + 1)])]
self.sent_pairs.append((sent, next_sent, next_sent_label))
self.sizes.append(((3 + sent[3]) + next_sent[3]))
def _sent_to_dataset_index(self, sent_sizes):
'\n Build index mapping block indices to the underlying dataset indices\n '
dataset_index = []
(ds_idx, ds_remaining) = ((- 1), 0)
for to_consume in sent_sizes:
sent_size = to_consume
if (ds_remaining == 0):
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = (sent_sizes[ds_idx] - ds_remaining)
while (to_consume > ds_remaining):
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append((start_ds_idx, start_offset, ds_idx, sent_size))
assert (ds_remaining == 0)
assert (ds_idx == (len(self.dataset) - 1))
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
'\n Go through a single document and genrate sentence paris from it\n '
current_chunk = []
current_length = 0
curr = 0
target_seq_length = max_num_tokens
if (np.random.random() < self.short_seq_prob):
target_seq_length = np.random.randint(2, max_num_tokens)
while (curr < len(doc)):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
if ((curr == (len(doc) - 1)) or (current_length >= target_seq_length)):
a_end = 1
if (len(current_chunk) > 2):
a_end = np.random.randint(1, (len(current_chunk) - 1))
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
next_sent_label = (1 if ((np.random.rand() > 0.5) and (len(current_chunk) != 1)) else 0)
if (not next_sent_label):
target_b_length = (target_seq_length - len_a)
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if (len_b >= target_b_length):
break
num_unused_segments = (len(current_chunk) - a_end)
curr -= num_unused_segments
else:
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
(sent_a, sent_b) = self._truncate_sentences(sent_a, sent_b, max_num_tokens)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(((3 + sent_a[3]) + sent_b[3]))
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
'\n Generate a random integer which is not in skip_ids. Sample range is [0, total)\n TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later\n '
rand_id = np.random.randint((total - len(skip_ids)))
return (rand_id if (rand_id < min(skip_ids)) else (rand_id + len(skip_ids)))
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
'\n Trancate a pair of sentence to limit total length under max_num_tokens\n Logics:\n 1. Truncate longer sentence\n 2. Tokens to be truncated could be at the beginning or the end of the sentnce\n Returns:\n Truncated sentences represented by dataset idx\n '
(len_a, len_b) = (sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b]))
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (((((len_a + len_b) - front_cut_a) - front_cut_b) - end_cut_a) - end_cut_b)
if (total_length <= max_num_tokens):
break
if (((len_a - front_cut_a) - end_cut_a) > ((len_b - front_cut_b) - end_cut_b)):
if (np.random.rand() < 0.5):
front_cut_a += 1
else:
end_cut_a += 1
elif (np.random.rand() < 0.5):
front_cut_b += 1
else:
end_cut_b += 1
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return (truncated_sent_a, truncated_sent_b)
def _cut_sentence(self, sent, front_cut, end_cut):
'\n Cut a sentence based on the numbers of tokens to be cut from beginning and end\n Represent the sentence as dataset idx and return\n '
(start_ds_idx, end_ds_idx, offset) = (sent[0], sent[(- 1)], 0)
target_len = ((sum(self.dataset.sizes[sent]) - front_cut) - end_cut)
while (front_cut > 0):
if (self.dataset.sizes[start_ds_idx] > front_cut):
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while (end_cut > 0):
if (self.dataset.sizes[end_ds_idx] > end_cut):
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return (start_ds_idx, offset, end_ds_idx, target_len)
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
'\n Fetch a block of tokens based on its dataset idx\n '
buffer = torch.cat([self.dataset[idx] for idx in range(start_ds_idx, (end_ds_idx + 1))])
(s, e) = (offset, (offset + length))
return buffer[s:e]
def __getitem__(self, index):
(block1, block2, next_sent_label) = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return (block1, block2, next_sent_label)
def __len__(self):
return len(self.sizes)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for (block1, block2, _) in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], (block1[2] + 1)):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], (block2[2] + 1)):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx)
|
class MaskedLMDataset(FairseqDataset):
'\n A wrapper Dataset for masked language modelling. The dataset\n wraps around TokenBlockDataset or BlockedPairDataset and creates a batch\n where the input blocks are masked according to the specified masking\n probability. Additionally the batch can also contain sentence level targets\n if this is specified.\n\n Args:\n dataset: Dataset which generates blocks of data. Only BlockPairDataset\n and TokenBlockDataset are supported.\n sizes: Sentence lengths\n vocab: Dictionary with the vocabulary and special tokens.\n pad_idx: Id of padding token in dictionary\n mask_idx: Id of mask token in dictionary\n classif_token_idx: Id of classification token in dictionary. This is the\n token associated with the sentence embedding (Eg: CLS for BERT)\n sep_token_idx: Id of separator token in dictionary\n (Eg: SEP in BERT)\n seed: Seed for random number generator for reproducibility.\n shuffle: Shuffle the elements before batching.\n has_pairs: Specifies whether the underlying dataset\n generates a pair of blocks along with a sentence_target or not.\n Setting it to True assumes that the underlying dataset generates a\n label for the pair of sentences which is surfaced as\n sentence_target. The default value assumes a single block with no\n sentence target.\n segment_id: An optional segment id for filling in the segment labels\n when we are in the single block setting (Eg: XLM). Default is 0.\n masking_ratio: specifies what percentage of the blocks should be masked.\n masking_prob: specifies the probability of a given token being\n replaced with the "MASK" token.\n random_token_prob: specifies the probability of a given token being\n replaced by a random token from the vocabulary.\n '
def __init__(self, dataset: FairseqDataset, sizes: np.ndarray, vocab: Dictionary, pad_idx: int, mask_idx: int, classif_token_idx: int, sep_token_idx: int, seed: int=1, shuffle: bool=True, has_pairs: bool=True, segment_id: int=0, masking_ratio: float=0.15, masking_prob: float=0.8, random_token_prob: float=0.1):
assert (isinstance(dataset, TokenBlockDataset) or isinstance(dataset, BlockPairDataset) or isinstance(dataset, ConcatDataset)), 'MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or ConcatDataset'
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.classif_token_idx = classif_token_idx
self.sep_token_idx = sep_token_idx
self.shuffle = shuffle
self.seed = seed
self.has_pairs = has_pairs
self.segment_id = segment_id
self.masking_ratio = masking_ratio
self.masking_prob = masking_prob
self.random_token_prob = random_token_prob
if (not has_pairs):
self.sizes = (self.sizes + 1)
def __getitem__(self, index: int):
if self.has_pairs:
(block_one, block_two, sentence_target) = self.dataset[index]
else:
block_one = self.dataset[index]
return {'id': index, 'block_one': block_one, 'block_two': (block_two if self.has_pairs else None), 'sentence_target': (sentence_target if self.has_pairs else None)}
def __len__(self):
return len(self.dataset)
def _mask_block(self, sentence: np.ndarray, mask_idx: int, pad_idx: int, dictionary_token_range: Tuple):
"\n Mask tokens for Masked Language Model training\n Samples mask_ratio tokens that will be predicted by LM.\n\n Note:This function may not be efficient enough since we had multiple\n conversions between np and torch, we can replace them with torch\n operators later.\n\n Args:\n sentence: 1d tensor to be masked\n mask_idx: index to use for masking the sentence\n pad_idx: index to use for masking the target for tokens we aren't\n predicting\n dictionary_token_range: range of indices in dictionary which can\n be used for random word replacement\n (e.g. without special characters)\n Return:\n masked_sent: masked sentence\n target: target with words which we are not predicting replaced\n by pad_idx\n "
masked_sent = np.copy(sentence)
sent_length = len(sentence)
mask_num = math.ceil((sent_length * self.masking_ratio))
mask = np.random.choice(sent_length, mask_num, replace=False)
target = np.copy(sentence)
for i in range(sent_length):
if (i in mask):
rand = np.random.random()
if (rand < self.masking_prob):
masked_sent[i] = mask_idx
elif (rand < (self.masking_prob + self.random_token_prob)):
masked_sent[i] = np.random.randint(dictionary_token_range[0], dictionary_token_range[1])
else:
target[i] = pad_idx
return (masked_sent, target)
def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int):
'\n Does the heavy lifting for creating a batch from the input list of\n examples. The logic is as follows:\n 1. Mask the input blocks. In case has_pair is True then we have 2\n blocks to mask.\n 2. Prepend the first masked block tensor with the special token\n used as sentence embedding. Eg: CLS in BERT. This happens\n irrespective of the value of has_pair.\n 3. If has_pair is True, then append the first masked block with the\n special separator token (eg: SEP for BERT) and compute segment\n label accordingly. In this case, also append the second masked\n block with this special separator token and compute its segment\n label.\n 4. For the targets tensor, prepend and append with padding index\n accordingly.\n 5. Concatenate all tensors.\n '
if (len(samples) == 0):
return {}
with data_utils.numpy_seed((self.seed + samples[0]['id'])):
for s in samples:
token_range = (self.vocab.nspecial, len(self.vocab))
(masked_blk_one, masked_tgt_one) = self._mask_block(s['block_one'], self.mask_idx, self.pad_idx, token_range)
tokens = np.concatenate([[self.classif_token_idx], masked_blk_one])
targets = np.concatenate([[self.pad_idx], masked_tgt_one])
segments = (np.ones(len(tokens)) * self.segment_id)
if self.has_pairs:
tokens_one = np.concatenate([tokens, [self.sep_token_idx]])
targets_one = np.concatenate([targets, [self.pad_idx]])
(masked_blk_two, masked_tgt_two) = self._mask_block(s['block_two'], self.mask_idx, self.pad_idx, token_range)
tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]])
targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]])
segments_one = np.zeros(len(tokens_one))
segments_two = np.ones(len(tokens_two))
tokens = np.concatenate([tokens_one, tokens_two])
targets = np.concatenate([targets_one, targets_two])
segments = np.concatenate([segments_one, segments_two])
s['source'] = torch.LongTensor(tokens)
s['segment_labels'] = torch.LongTensor(segments)
s['lm_target'] = torch.LongTensor(targets)
def merge(key):
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad=False)
return {'id': torch.LongTensor([s['id'] for s in samples]), 'ntokens': sum((len(s['source']) for s in samples)), 'net_input': {'src_tokens': merge('source'), 'segment_labels': merge('segment_labels')}, 'lm_target': merge('lm_target'), 'sentence_target': (torch.LongTensor([s['sentence_target'] for s in samples]) if self.has_pairs else None), 'nsentences': len(samples)}
def collater(self, samples: List[Dict]):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch of data\n '
return self._collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index: int):
'\n Return the number of tokens in a sample. This value is used to\n enforce max-tokens during batching.\n '
return self.sizes[index]
def size(self, index: int):
"\n Return an example's size as a float or tuple. This value is used when\n filtering a dataset with max-positions.\n "
return self.sizes[index]
def ordered_indices(self):
'\n Return an ordered list of indices. Batches will be constructed based\n on this order.\n '
if self.shuffle:
return np.random.permutation(len(self))
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
|
class MaskedLMDictionary(Dictionary):
'\n Dictionary for Masked Language Modelling tasks. This extends Dictionary by\n adding the mask symbol.\n '
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', mask='<mask>'):
super().__init__(pad, eos, unk)
self.mask_word = mask
self.mask_index = self.add_symbol(mask)
self.nspecial = len(self.symbols)
def mask(self):
'Helper to get index of mask symbol'
return self.mask_index
|
class BertDictionary(MaskedLMDictionary):
'\n Dictionary for BERT task. This extends MaskedLMDictionary by adding support\n for cls and sep symbols.\n '
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', mask='<mask>', cls='<cls>', sep='<sep>'):
super().__init__(pad, eos, unk, mask)
self.cls_word = cls
self.sep_word = sep
self.cls_index = self.add_symbol(cls)
self.sep_index = self.add_symbol(sep)
self.nspecial = len(self.symbols)
def cls(self):
'Helper to get index of cls symbol'
return self.cls_index
def sep(self):
'Helper to get index of sep symbol'
return self.sep_index
|
class ListDataset(BaseWrapperDataset):
def __init__(self, dataset, sizes=None):
super().__init__(dataset)
self._sizes = sizes
def __iter__(self):
for x in self.dataset:
(yield x)
def collater(self, samples):
return samples
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def set_epoch(self, epoch):
pass
|
class LRUCacheDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
@lru_cache(maxsize=8)
def __getitem__(self, index):
return self.dataset[index]
@lru_cache(maxsize=8)
def collater(self, samples):
return self.dataset.collater(samples)
|
class MaskTokensDataset(BaseWrapperDataset):
'\n A wrapper Dataset for masked language modeling.\n\n Input items are masked according to the specified masking probability.\n\n Args:\n dataset: Dataset to wrap.\n sizes: Sentence lengths\n vocab: Dictionary with the vocabulary and special tokens.\n pad_idx: Id of pad token in vocab\n mask_idx: Id of mask token in vocab\n return_masked_tokens: controls whether to return the non-masked tokens\n (the default) or to return a tensor with the original masked token\n IDs (and *pad_idx* elsewhere). The latter is useful as targets for\n masked LM training.\n seed: Seed for random number generator for reproducibility.\n mask_prob: probability of replacing a token with *mask_idx*.\n leave_unmasked_prob: probability that a masked token is unmasked.\n random_token_prob: probability of replacing a masked token with a\n random token from the vocabulary.\n freq_weighted_replacement: sample random replacement words based on\n word frequencies in the vocab.\n mask_whole_words: only mask whole words. This should be a byte mask\n over vocab indices, indicating whether it is the beginning of a\n word. We will extend any mask to encompass the whole word.\n bpe: BPE to use for whole-word masking.\n '
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
'Return the source and target datasets for masked LM training.'
dataset = LRUCacheDataset(dataset)
return (LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)), LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)))
def __init__(self, dataset: torch.utils.data.Dataset, vocab: Dictionary, pad_idx: int, mask_idx: int, return_masked_tokens: bool=False, seed: int=1, mask_prob: float=0.15, leave_unmasked_prob: float=0.1, random_token_prob: float=0.1, freq_weighted_replacement: bool=False, mask_whole_words: torch.Tensor=None):
assert (0.0 < mask_prob < 1.0)
assert (0.0 <= random_token_prob <= 1.0)
assert (0.0 <= leave_unmasked_prob <= 1.0)
assert ((random_token_prob + leave_unmasked_prob) <= 1.0)
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
if (random_token_prob > 0.0):
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[:self.vocab.nspecial] = 0
self.weights = (weights / weights.sum())
self.epoch = 0
def set_epoch(self, epoch, **unused):
self.epoch = epoch
@lru_cache(maxsize=8)
def __getitem__(self, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert (self.mask_idx not in item), 'Dataset contains mask_idx (={}), this is not expected!'.format(self.mask_idx)
if (self.mask_whole_words is not None):
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view((- 1))
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert (len(words) == sz)
word_lens = list(map(len, words))
mask = np.full(sz, False)
num_mask = int(((self.mask_prob * sz) + np.random.rand()))
mask[np.random.choice(sz, num_mask, replace=False)] = True
if self.return_masked_tokens:
if (self.mask_whole_words is not None):
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[(torch.from_numpy(mask.astype(np.uint8)) == 1)]
return torch.from_numpy(new_item)
rand_or_unmask_prob = (self.random_token_prob + self.leave_unmasked_prob)
if (rand_or_unmask_prob > 0.0):
rand_or_unmask = (mask & (np.random.rand(sz) < rand_or_unmask_prob))
if (self.random_token_prob == 0.0):
unmask = rand_or_unmask
rand_mask = None
elif (self.leave_unmasked_prob == 0.0):
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = (self.leave_unmasked_prob / rand_or_unmask_prob)
decision = (np.random.rand(sz) < unmask_prob)
unmask = (rand_or_unmask & decision)
rand_mask = (rand_or_unmask & (~ decision))
else:
unmask = rand_mask = None
if (unmask is not None):
mask = (mask ^ unmask)
if (self.mask_whole_words is not None):
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if (rand_mask is not None):
num_rand = rand_mask.sum()
if (num_rand > 0):
if (self.mask_whole_words is not None):
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(len(self.vocab), num_rand, p=self.weights)
return torch.from_numpy(new_item)
|
def collate(samples, pad_idx, eos_idx):
if (len(samples) == 0):
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens([s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False))
return res
else:
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad=False)
src_tokens = merge('source')
if (samples[0]['target'] is not None):
is_target_list = isinstance(samples[0]['target'], list)
target = merge('target', is_target_list)
else:
target = src_tokens
return {'id': torch.LongTensor([s['id'] for s in samples]), 'nsentences': len(samples), 'ntokens': sum((len(s['source']) for s in samples)), 'net_input': {'src_tokens': src_tokens, 'src_lengths': torch.LongTensor([s['source'].numel() for s in samples])}, 'target': target}
|
class MonolingualDataset(FairseqDataset):
'\n A wrapper around torch.utils.data.Dataset for monolingual data.\n\n Args:\n dataset (torch.utils.data.Dataset): dataset to wrap\n sizes (List[int]): sentence lengths\n vocab (~fairseq.data.Dictionary): vocabulary\n shuffle (bool, optional): shuffle the elements before batching\n (default: True).\n '
def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle, targets=None, add_bos_token=False):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
assert ((targets is None) or all(((t in {'self', 'future', 'past'}) for t in targets))), "targets must be none or one of 'self', 'future', 'past'"
if ((targets is not None) and (len(targets) == 0)):
targets = None
self.targets = targets
def __getitem__(self, index):
if (self.targets is not None):
(source, future_target, past_target) = self.dataset[index]
(source, target) = self._make_source_target(source, future_target, past_target)
else:
source = self.dataset[index]
target = None
(source, target) = self._maybe_add_bos(source, target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if (self.targets is not None):
target = []
if (self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) and (source[(- 1)] != self.vocab.eos())):
source = torch.cat([source, source.new([self.vocab.eos()])])
if ('future' in self.targets):
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if ('past' in self.targets):
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[((- 2), None)]])
for t in self.targets:
if (t == 'self'):
target.append(source)
elif (t == 'future'):
target.append(future_target)
elif (t == 'past'):
target.append(past_target)
else:
raise Exception(('invalid target ' + t))
if (len(target) == 1):
target = target[0]
else:
target = future_target
return (source, self._filter_vocab(target))
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if (target is not None):
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return (source, target)
def _filter_vocab(self, target):
if (len(self.tgt_vocab) != len(self.vocab)):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch with the following keys:\n\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the right.\n\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the right.\n '
return collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
return self.sizes[index]
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return self.sizes[index]
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
|
def uniform_sampler(x):
return np.random.choice(x, 1).item()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.