code stringlengths 17 6.64M |
|---|
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(self, dataset, epoch=0, num_shards=1, shard_id=0):
self.dataset = dataset
self.epoch = epoch
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch += 1
self._current_epoch_iterator = CountingIterator(iterable=ShardedIterator(iterable=self.dataset, num_shards=self.num_shards, shard_id=self.shard_id))
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return (not self._current_epoch_iterator.has_next())
@property
def iterations_in_epoch(self) -> int:
if (self._current_epoch_iterator is not None):
return self._current_epoch_iterator.count
return 0
def state_dict(self):
return {'epoch': self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
|
class EpochBatchIterator(EpochBatchIterating):
'A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.\n\n Compared to :class:`torch.utils.data.DataLoader`, this iterator:\n\n - can be reused across multiple epochs with the :func:`next_epoch_itr`\n method (optionally shuffled between epochs)\n - can be serialized/deserialized with the :func:`state_dict` and\n :func:`load_state_dict` methods\n - supports sharding with the *num_shards* and *shard_id* arguments\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset from which to load the data\n collate_fn (callable): merges a list of samples to form a mini-batch\n batch_sampler (~torch.utils.data.Sampler): an iterator over batches of\n indices\n seed (int, optional): seed for random number generator for\n reproducibility (default: 1).\n num_shards (int, optional): shard the data iterator into N\n shards (default: 1).\n shard_id (int, optional): which shard of the data iterator to\n return (default: 0).\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means the data will be loaded in the main process\n (default: 0).\n epoch (int, optional): the epoch to start the iterator from\n (default: 0).\n '
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
self.epoch = epoch
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
'Return a new iterator over the dataset.\n\n Args:\n shuffle (bool, optional): shuffle batches before returning the\n iterator (default: True).\n fix_batches_to_gpus: ensure that batches are always\n allocated to the same shards across epochs. Requires\n that :attr:`dataset` supports prefetching (default: False).\n '
if (self._next_epoch_itr is not None):
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus)
self.dataset.set_epoch(self.epoch)
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
'Returns whether the most recent epoch iterator has been exhausted'
return (not self._cur_epoch_itr.has_next())
@property
def iterations_in_epoch(self):
'The number of consumed batches in the current epoch.'
if (self._cur_epoch_itr is not None):
return self._cur_epoch_itr.count
elif (self._next_epoch_itr is not None):
return self._next_epoch_itr.count
return 0
def state_dict(self):
'Returns a dictionary containing a whole state of the iterator.'
return {'epoch': self.epoch, 'iterations_in_epoch': self.iterations_in_epoch}
def load_state_dict(self, state_dict):
'Copies the state of the iterator from the given *state_dict*.'
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if (itr_pos > 0):
self._next_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle=state_dict.get('shuffle', True), offset=itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if (shuffle and (not fix_batches_to_gpus)):
batches = shuffle_batches(list(batches), (self.seed + epoch))
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
self.dataset.prefetch([i for s in batches for i in s])
if (shuffle and fix_batches_to_gpus):
batches = shuffle_batches(batches, ((self.seed + epoch) + self.shard_id))
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), (self.seed + epoch))
else:
batches = self.frozen_batches
batches = list(ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]))
if ((offset > 0) and (offset >= len(batches))):
return None
if (self.num_workers > 0):
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
return CountingIterator(torch.utils.data.DataLoader(self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers), start=offset)
|
class GroupedIterator(object):
'Wrapper around an iterable that returns groups (chunks) of items.\n\n Args:\n iterable (iterable): iterable to wrap\n chunk_size (int): size of each chunk\n '
def __init__(self, iterable, chunk_size):
self._len = int(math.ceil((len(iterable) / float(chunk_size))))
self.offset = int(math.ceil((getattr(iterable, 'count', 0) / float(chunk_size))))
self.itr = iterable
self.chunk_size = chunk_size
def __len__(self):
return self._len
def __iter__(self):
return self
def __next__(self):
chunk = []
try:
for _ in range(self.chunk_size):
chunk.append(next(self.itr))
except StopIteration as e:
if (len(chunk) == 0):
raise e
return chunk
|
class ShardedIterator(object):
"A sharded wrapper around an iterable, padded to length.\n\n Args:\n iterable (iterable): iterable to wrap\n num_shards (int): number of shards to split the iterable into\n shard_id (int): which shard to iterator over\n fill_value (Any, optional): padding value when the iterable doesn't\n evenly divide *num_shards* (default: None).\n "
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if ((shard_id < 0) or (shard_id >= num_shards)):
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = (len(iterable) // num_shards)
if ((len(iterable) % num_shards) > 0):
self._sharded_len += 1
self.itr = itertools.zip_longest(range(self._sharded_len), itertools.islice(iterable, shard_id, len(iterable), num_shards), fillvalue=fill_value)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
|
class BlockPairDataset(FairseqDataset):
"Break a Dataset of tokens into sentence pair blocks for next sentence\n prediction as well as masked language model.\n\n High-level logics are:\n 1. break input tensor to tensor blocks\n 2. pair the blocks with 50% next sentence and 50% random sentence\n 3. return paired blocks as well as related segment labels\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to break into blocks\n sizes: array of sentence lengths\n dictionary: dictionary for the task\n block_size: maximum block size\n break_mode: mode for breaking copurs into block pairs. currently we support\n 2 modes\n doc: respect document boundaries and each part of the pair should belong to on document\n none: don't respect any boundary and cut tokens evenly\n short_seq_prob: probability for generating shorter block pairs\n doc_break_size: Size for empty line separating documents. Typically 1 if\n the sentences have eos, 0 otherwise.\n "
def __init__(self, dataset, dictionary, sizes, block_size, break_mode='doc', short_seq_prob=0.1, doc_break_size=1):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert (len(dataset) == len(sizes))
if (break_mode == 'doc'):
cur_doc = []
for (sent_id, sz) in enumerate(sizes):
assert ((doc_break_size == 0) or (sz != 0)), 'when doc_break_size is non-zero, we expect documents to beseparated by a blank line with a single eos.'
if (sz == doc_break_size):
if (len(cur_doc) == 0):
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = (block_size - 3)
self.sent_pairs = []
self.sizes = []
for (doc_id, doc) in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif ((break_mode is None) or (break_mode == 'none')):
sent_length = ((block_size - 3) // 2)
total_len = sum(dataset.sizes)
length = math.ceil((total_len / sent_length))
def block_at(i):
start = (i * sent_length)
end = min((start + sent_length), total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([(e - s) for (s, e) in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
self._pair_sentences(dataset_index)
else:
raise ValueError(('Invalid break_mode: ' + break_mode))
def _pair_sentences(self, dataset_index):
'\n Give a list of evenly cut blocks/sentences, pair these sentences with 50%\n consecutive sentences and 50% random sentences.\n This is used for none break mode\n '
for (sent_id, sent) in enumerate(dataset_index):
next_sent_label = (1 if ((np.random.rand() > 0.5) and (sent_id != (len(dataset_index) - 1))) else 0)
if next_sent_label:
next_sent = dataset_index[(sent_id + 1)]
else:
next_sent = dataset_index[self._skip_sampling(len(dataset_index), [sent_id, (sent_id + 1)])]
self.sent_pairs.append((sent, next_sent, next_sent_label))
self.sizes.append(((3 + sent[3]) + next_sent[3]))
def _sent_to_dataset_index(self, sent_sizes):
'\n Build index mapping block indices to the underlying dataset indices\n '
dataset_index = []
(ds_idx, ds_remaining) = ((- 1), 0)
for to_consume in sent_sizes:
sent_size = to_consume
if (ds_remaining == 0):
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = (sent_sizes[ds_idx] - ds_remaining)
while (to_consume > ds_remaining):
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append((start_ds_idx, start_offset, ds_idx, sent_size))
assert (ds_remaining == 0)
assert (ds_idx == (len(self.dataset) - 1))
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
'\n Go through a single document and genrate sentence paris from it\n '
current_chunk = []
current_length = 0
curr = 0
target_seq_length = max_num_tokens
if (np.random.random() < self.short_seq_prob):
target_seq_length = np.random.randint(2, max_num_tokens)
while (curr < len(doc)):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
if ((curr == (len(doc) - 1)) or (current_length >= target_seq_length)):
a_end = 1
if (len(current_chunk) > 2):
a_end = np.random.randint(1, (len(current_chunk) - 1))
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
next_sent_label = (1 if ((np.random.rand() > 0.5) and (len(current_chunk) != 1)) else 0)
if (not next_sent_label):
target_b_length = (target_seq_length - len_a)
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if (len_b >= target_b_length):
break
num_unused_segments = (len(current_chunk) - a_end)
curr -= num_unused_segments
else:
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
(sent_a, sent_b) = self._truncate_sentences(sent_a, sent_b, max_num_tokens)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(((3 + sent_a[3]) + sent_b[3]))
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
'\n Generate a random integer which is not in skip_ids. Sample range is [0, total)\n TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later\n '
rand_id = np.random.randint((total - len(skip_ids)))
return (rand_id if (rand_id < min(skip_ids)) else (rand_id + len(skip_ids)))
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
'\n Trancate a pair of sentence to limit total length under max_num_tokens\n Logics:\n 1. Truncate longer sentence\n 2. Tokens to be truncated could be at the beginning or the end of the sentnce\n Returns:\n Truncated sentences represented by dataset idx\n '
(len_a, len_b) = (sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b]))
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (((((len_a + len_b) - front_cut_a) - front_cut_b) - end_cut_a) - end_cut_b)
if (total_length <= max_num_tokens):
break
if (((len_a - front_cut_a) - end_cut_a) > ((len_b - front_cut_b) - end_cut_b)):
if (np.random.rand() < 0.5):
front_cut_a += 1
else:
end_cut_a += 1
elif (np.random.rand() < 0.5):
front_cut_b += 1
else:
end_cut_b += 1
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return (truncated_sent_a, truncated_sent_b)
def _cut_sentence(self, sent, front_cut, end_cut):
'\n Cut a sentence based on the numbers of tokens to be cut from beginning and end\n Represent the sentence as dataset idx and return\n '
(start_ds_idx, end_ds_idx, offset) = (sent[0], sent[(- 1)], 0)
target_len = ((sum(self.dataset.sizes[sent]) - front_cut) - end_cut)
while (front_cut > 0):
if (self.dataset.sizes[start_ds_idx] > front_cut):
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while (end_cut > 0):
if (self.dataset.sizes[end_ds_idx] > end_cut):
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return (start_ds_idx, offset, end_ds_idx, target_len)
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
'\n Fetch a block of tokens based on its dataset idx\n '
buffer = torch.cat([self.dataset[idx] for idx in range(start_ds_idx, (end_ds_idx + 1))])
(s, e) = (offset, (offset + length))
return buffer[s:e]
def __getitem__(self, index):
(block1, block2, next_sent_label) = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return (block1, block2, next_sent_label)
def __len__(self):
return len(self.sizes)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for (block1, block2, _) in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], (block1[2] + 1)):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], (block2[2] + 1)):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx)
|
class MaskedLMDataset(FairseqDataset):
'\n A wrapper Dataset for masked language modelling. The dataset\n wraps around TokenBlockDataset or BlockedPairDataset and creates a batch\n where the input blocks are masked according to the specified masking\n probability. Additionally the batch can also contain sentence level targets\n if this is specified.\n\n Args:\n dataset: Dataset which generates blocks of data. Only BlockPairDataset\n and TokenBlockDataset are supported.\n sizes: Sentence lengths\n vocab: Dictionary with the vocabulary and special tokens.\n pad_idx: Id of padding token in dictionary\n mask_idx: Id of mask token in dictionary\n classif_token_idx: Id of classification token in dictionary. This is the\n token associated with the sentence embedding (Eg: CLS for BERT)\n sep_token_idx: Id of separator token in dictionary\n (Eg: SEP in BERT)\n seed: Seed for random number generator for reproducibility.\n shuffle: Shuffle the elements before batching.\n has_pairs: Specifies whether the underlying dataset\n generates a pair of blocks along with a sentence_target or not.\n Setting it to True assumes that the underlying dataset generates a\n label for the pair of sentences which is surfaced as\n sentence_target. The default value assumes a single block with no\n sentence target.\n segment_id: An optional segment id for filling in the segment labels\n when we are in the single block setting (Eg: XLM). Default is 0.\n masking_ratio: specifies what percentage of the blocks should be masked.\n masking_prob: specifies the probability of a given token being\n replaced with the "MASK" token.\n random_token_prob: specifies the probability of a given token being\n replaced by a random token from the vocabulary.\n '
def __init__(self, dataset: FairseqDataset, sizes: np.ndarray, vocab: Dictionary, pad_idx: int, mask_idx: int, classif_token_idx: int, sep_token_idx: int, seed: int=1, shuffle: bool=True, has_pairs: bool=True, segment_id: int=0, masking_ratio: float=0.15, masking_prob: float=0.8, random_token_prob: float=0.1):
assert (isinstance(dataset, TokenBlockDataset) or isinstance(dataset, BlockPairDataset) or isinstance(dataset, ConcatDataset)), 'MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or ConcatDataset'
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.classif_token_idx = classif_token_idx
self.sep_token_idx = sep_token_idx
self.shuffle = shuffle
self.seed = seed
self.has_pairs = has_pairs
self.segment_id = segment_id
self.masking_ratio = masking_ratio
self.masking_prob = masking_prob
self.random_token_prob = random_token_prob
if (not has_pairs):
self.sizes = (self.sizes + 1)
def __getitem__(self, index: int):
if self.has_pairs:
(block_one, block_two, sentence_target) = self.dataset[index]
else:
block_one = self.dataset[index]
return {'id': index, 'block_one': block_one, 'block_two': (block_two if self.has_pairs else None), 'sentence_target': (sentence_target if self.has_pairs else None)}
def __len__(self):
return len(self.dataset)
def _mask_block(self, sentence: np.ndarray, mask_idx: int, pad_idx: int, dictionary_token_range: Tuple):
"\n Mask tokens for Masked Language Model training\n Samples mask_ratio tokens that will be predicted by LM.\n\n Note:This function may not be efficient enough since we had multiple\n conversions between np and torch, we can replace them with torch\n operators later.\n\n Args:\n sentence: 1d tensor to be masked\n mask_idx: index to use for masking the sentence\n pad_idx: index to use for masking the target for tokens we aren't\n predicting\n dictionary_token_range: range of indices in dictionary which can\n be used for random word replacement\n (e.g. without special characters)\n Return:\n masked_sent: masked sentence\n target: target with words which we are not predicting replaced\n by pad_idx\n "
masked_sent = np.copy(sentence)
sent_length = len(sentence)
mask_num = math.ceil((sent_length * self.masking_ratio))
mask = np.random.choice(sent_length, mask_num, replace=False)
target = np.copy(sentence)
for i in range(sent_length):
if (i in mask):
rand = np.random.random()
if (rand < self.masking_prob):
masked_sent[i] = mask_idx
elif (rand < (self.masking_prob + self.random_token_prob)):
masked_sent[i] = np.random.randint(dictionary_token_range[0], dictionary_token_range[1])
else:
target[i] = pad_idx
return (masked_sent, target)
def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int):
'\n Does the heavy lifting for creating a batch from the input list of\n examples. The logic is as follows:\n 1. Mask the input blocks. In case has_pair is True then we have 2\n blocks to mask.\n 2. Prepend the first masked block tensor with the special token\n used as sentence embedding. Eg: CLS in BERT. This happens\n irrespective of the value of has_pair.\n 3. If has_pair is True, then append the first masked block with the\n special separator token (eg: SEP for BERT) and compute segment\n label accordingly. In this case, also append the second masked\n block with this special separator token and compute its segment\n label.\n 4. For the targets tensor, prepend and append with padding index\n accordingly.\n 5. Concatenate all tensors.\n '
if (len(samples) == 0):
return {}
with data_utils.numpy_seed((self.seed + samples[0]['id'])):
for s in samples:
token_range = (self.vocab.nspecial, len(self.vocab))
(masked_blk_one, masked_tgt_one) = self._mask_block(s['block_one'], self.mask_idx, self.pad_idx, token_range)
tokens = np.concatenate([[self.classif_token_idx], masked_blk_one])
targets = np.concatenate([[self.pad_idx], masked_tgt_one])
segments = (np.ones(len(tokens)) * self.segment_id)
if self.has_pairs:
tokens_one = np.concatenate([tokens, [self.sep_token_idx]])
targets_one = np.concatenate([targets, [self.pad_idx]])
(masked_blk_two, masked_tgt_two) = self._mask_block(s['block_two'], self.mask_idx, self.pad_idx, token_range)
tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]])
targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]])
segments_one = np.zeros(len(tokens_one))
segments_two = np.ones(len(tokens_two))
tokens = np.concatenate([tokens_one, tokens_two])
targets = np.concatenate([targets_one, targets_two])
segments = np.concatenate([segments_one, segments_two])
s['source'] = torch.LongTensor(tokens)
s['segment_labels'] = torch.LongTensor(segments)
s['lm_target'] = torch.LongTensor(targets)
def merge(key):
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad=False)
return {'id': torch.LongTensor([s['id'] for s in samples]), 'ntokens': sum((len(s['source']) for s in samples)), 'net_input': {'src_tokens': merge('source'), 'segment_labels': merge('segment_labels')}, 'lm_target': merge('lm_target'), 'sentence_target': (torch.LongTensor([s['sentence_target'] for s in samples]) if self.has_pairs else None), 'nsentences': len(samples)}
def collater(self, samples: List[Dict]):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch of data\n '
return self._collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index: int):
'\n Return the number of tokens in a sample. This value is used to\n enforce max-tokens during batching.\n '
return self.sizes[index]
def size(self, index: int):
"\n Return an example's size as a float or tuple. This value is used when\n filtering a dataset with max-positions.\n "
return self.sizes[index]
def ordered_indices(self):
'\n Return an ordered list of indices. Batches will be constructed based\n on this order.\n '
if self.shuffle:
return np.random.permutation(len(self))
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
|
class MaskedLMDictionary(Dictionary):
'\n Dictionary for Masked Language Modelling tasks. This extends Dictionary by\n adding the mask symbol.\n '
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', mask='<mask>'):
super().__init__(pad, eos, unk)
self.mask_word = mask
self.mask_index = self.add_symbol(mask)
self.nspecial = len(self.symbols)
def mask(self):
'Helper to get index of mask symbol'
return self.mask_index
|
class BertDictionary(MaskedLMDictionary):
'\n Dictionary for BERT task. This extends MaskedLMDictionary by adding support\n for cls and sep symbols.\n '
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', mask='<mask>', cls='<cls>', sep='<sep>'):
super().__init__(pad, eos, unk, mask)
self.cls_word = cls
self.sep_word = sep
self.cls_index = self.add_symbol(cls)
self.sep_index = self.add_symbol(sep)
self.nspecial = len(self.symbols)
def cls(self):
'Helper to get index of cls symbol'
return self.cls_index
def sep(self):
'Helper to get index of sep symbol'
return self.sep_index
|
class ListDataset(BaseWrapperDataset):
def __init__(self, dataset, sizes=None):
super().__init__(dataset)
self._sizes = sizes
def collater(self, samples):
return samples
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def set_epoch(self, epoch):
pass
|
class LRUCacheDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
@lru_cache(maxsize=8)
def __getitem__(self, index):
return self.dataset[index]
@lru_cache(maxsize=8)
def collater(self, samples):
return self.dataset.collater(samples)
|
class MaskTokensDataset(BaseWrapperDataset):
'\n A wrapper Dataset for masked language modeling.\n\n Input items are masked according to the specified masking probability.\n\n Args:\n dataset: Dataset to wrap.\n sizes: Sentence lengths\n vocab: Dictionary with the vocabulary and special tokens.\n pad_idx: Id of pad token in vocab\n mask_idx: Id of mask token in vocab\n return_masked_tokens: controls whether to return the non-masked tokens\n (the default) or to return a tensor with the original masked token\n IDs (and *pad_idx* elsewhere). The latter is useful as targets for\n masked LM training.\n seed: Seed for random number generator for reproducibility.\n mask_prob: probability of replacing a token with *mask_idx*.\n leave_unmasked_prob: probability that a masked token is unmasked.\n random_token_prob: probability of replacing a masked token with a\n random token from the vocabulary.\n freq_weighted_replacement: sample random replacement words based on\n word frequencies in the vocab.\n mask_whole_words: only mask whole words. This should be a byte mask\n over vocab indices, indicating whether it is the beginning of a\n word. We will extend any mask to encompass the whole word.\n bpe: BPE to use for whole-word masking.\n '
@classmethod
def apply_mask(cls, dataset: torch.utils.data.Dataset, *args, **kwargs):
'Return the source and target datasets for masked LM training.'
dataset = LRUCacheDataset(dataset)
return (LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=False)), LRUCacheDataset(cls(dataset, *args, **kwargs, return_masked_tokens=True)))
def __init__(self, dataset: torch.utils.data.Dataset, vocab: Dictionary, pad_idx: int, mask_idx: int, return_masked_tokens: bool=False, seed: int=1, mask_prob: float=0.15, leave_unmasked_prob: float=0.1, random_token_prob: float=0.1, freq_weighted_replacement: bool=False, mask_whole_words: torch.Tensor=None):
assert (0.0 < mask_prob < 1.0)
assert (0.0 <= random_token_prob <= 1.0)
assert (0.0 <= leave_unmasked_prob <= 1.0)
assert ((random_token_prob + leave_unmasked_prob) <= 1.0)
self.dataset = dataset
self.vocab = vocab
self.pad_idx = pad_idx
self.mask_idx = mask_idx
self.return_masked_tokens = return_masked_tokens
self.seed = seed
self.mask_prob = mask_prob
self.leave_unmasked_prob = leave_unmasked_prob
self.random_token_prob = random_token_prob
self.mask_whole_words = mask_whole_words
if (random_token_prob > 0.0):
if freq_weighted_replacement:
weights = np.array(self.vocab.count)
else:
weights = np.ones(len(self.vocab))
weights[:self.vocab.nspecial] = 0
self.weights = (weights / weights.sum())
self.epoch = 0
def set_epoch(self, epoch, **unused):
self.epoch = epoch
@lru_cache(maxsize=8)
def __getitem__(self, index: int):
with data_utils.numpy_seed(self.seed, self.epoch, index):
item = self.dataset[index]
sz = len(item)
assert (self.mask_idx not in item), 'Dataset contains mask_idx (={}), this is not expected!'.format(self.mask_idx)
if (self.mask_whole_words is not None):
word_begins_mask = self.mask_whole_words.gather(0, item)
word_begins_idx = word_begins_mask.nonzero().view((- 1))
sz = len(word_begins_idx)
words = np.split(word_begins_mask, word_begins_idx)[1:]
assert (len(words) == sz)
word_lens = list(map(len, words))
mask = np.full(sz, False)
num_mask = int(((self.mask_prob * sz) + np.random.rand()))
mask[np.random.choice(sz, num_mask, replace=False)] = True
if self.return_masked_tokens:
if (self.mask_whole_words is not None):
mask = np.repeat(mask, word_lens)
new_item = np.full(len(mask), self.pad_idx)
new_item[mask] = item[(torch.from_numpy(mask.astype(np.uint8)) == 1)]
return torch.from_numpy(new_item)
rand_or_unmask_prob = (self.random_token_prob + self.leave_unmasked_prob)
if (rand_or_unmask_prob > 0.0):
rand_or_unmask = (mask & (np.random.rand(sz) < rand_or_unmask_prob))
if (self.random_token_prob == 0.0):
unmask = rand_or_unmask
rand_mask = None
elif (self.leave_unmasked_prob == 0.0):
unmask = None
rand_mask = rand_or_unmask
else:
unmask_prob = (self.leave_unmasked_prob / rand_or_unmask_prob)
decision = (np.random.rand(sz) < unmask_prob)
unmask = (rand_or_unmask & decision)
rand_mask = (rand_or_unmask & (~ decision))
else:
unmask = rand_mask = None
if (unmask is not None):
mask = (mask ^ unmask)
if (self.mask_whole_words is not None):
mask = np.repeat(mask, word_lens)
new_item = np.copy(item)
new_item[mask] = self.mask_idx
if (rand_mask is not None):
num_rand = rand_mask.sum()
if (num_rand > 0):
if (self.mask_whole_words is not None):
rand_mask = np.repeat(rand_mask, word_lens)
num_rand = rand_mask.sum()
new_item[rand_mask] = np.random.choice(len(self.vocab), num_rand, p=self.weights)
return torch.from_numpy(new_item)
|
def collate(samples, pad_idx, eos_idx):
if (len(samples) == 0):
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens([s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False))
return res
else:
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad=False)
src_tokens = merge('source')
if (samples[0]['target'] is not None):
is_target_list = isinstance(samples[0]['target'], list)
target = merge('target', is_target_list)
else:
target = src_tokens
return {'id': torch.LongTensor([s['id'] for s in samples]), 'nsentences': len(samples), 'ntokens': sum((len(s['source']) for s in samples)), 'net_input': {'src_tokens': src_tokens, 'src_lengths': torch.LongTensor([s['source'].numel() for s in samples])}, 'target': target}
|
class MonolingualDataset(FairseqDataset):
'\n A wrapper around torch.utils.data.Dataset for monolingual data.\n\n Args:\n dataset (torch.utils.data.Dataset): dataset to wrap\n sizes (List[int]): sentence lengths\n vocab (~fairseq.data.Dictionary): vocabulary\n shuffle (bool, optional): shuffle the elements before batching\n (default: True).\n '
def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle, targets=None, add_bos_token=False):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
assert ((targets is None) or all(((t in {'self', 'future', 'past'}) for t in targets))), "targets must be none or one of 'self', 'future', 'past'"
if ((targets is not None) and (len(targets) == 0)):
targets = None
self.targets = targets
def __getitem__(self, index):
if (self.targets is not None):
(source, future_target, past_target) = self.dataset[index]
(source, target) = self._make_source_target(source, future_target, past_target)
else:
source = self.dataset[index]
target = None
(source, target) = self._maybe_add_bos(source, target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if (self.targets is not None):
target = []
if (self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) and (source[(- 1)] != self.vocab.eos())):
source = torch.cat([source, source.new([self.vocab.eos()])])
if ('future' in self.targets):
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if ('past' in self.targets):
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[((- 2), None)]])
for t in self.targets:
if (t == 'self'):
target.append(source)
elif (t == 'future'):
target.append(future_target)
elif (t == 'past'):
target.append(past_target)
else:
raise Exception(('invalid target ' + t))
if (len(target) == 1):
target = target[0]
else:
target = future_target
return (source, self._filter_vocab(target))
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if (target is not None):
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return (source, target)
def _filter_vocab(self, target):
if (len(self.tgt_vocab) != len(self.vocab)):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch with the following keys:\n\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the right.\n\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the right.\n '
return collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
return self.sizes[index]
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return self.sizes[index]
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices)
|
def uniform_sampler(x):
return np.random.choice(x, 1).item()
|
class MultiCorpusSampledDataset(FairseqDataset):
'\n Stores multiple instances of FairseqDataset together and in every iteration\n creates a batch by first sampling a dataset according to a specified\n probability distribution and then getting instances from that dataset.\n\n Args:\n datasets: an OrderedDict of FairseqDataset instances.\n sampling_func: A function for sampling over list of dataset keys.\n Default strategy is to sample uniformly.\n '
def __init__(self, datasets: Dict[(str, FairseqDataset)], sampling_func: Callable[([List], int)]=None):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
if (sampling_func is None):
sampling_func = uniform_sampler
self.sampling_func = sampling_func
self.total_num_instances = 0
for (_, dataset) in datasets.items():
assert isinstance(dataset, FairseqDataset)
self.total_num_instances += dataset.__len__()
self._ordered_indices = None
def __len__(self):
'\n Length of this dataset is the sum of individual datasets\n '
return self.total_num_instances
def ordered_indices(self):
"\n Ordered indices for batching. Here we call the underlying\n dataset's ordered_indices() so that we get the same random ordering\n as we would have from using the underlying dataset directly.\n "
if (self._ordered_indices is None):
self._ordered_indices = OrderedDict([(key, dataset.ordered_indices()) for (key, dataset) in self.datasets.items()])
return np.arange(len(self))
def _map_index_to_dataset(self, key: int, index: int):
'\n Different underlying datasets have different lengths. In order to ensure\n we are not accessing an index outside the range of the current dataset\n size, we wrap around. This function should be called after we have\n created an ordering for this and all underlying datasets.\n '
assert (self._ordered_indices is not None), 'Must call MultiCorpusSampledDataset.ordered_indices() first'
mapped_index = (index % len(self.datasets[key]))
return self._ordered_indices[key][mapped_index]
def __getitem__(self, index: int):
'\n Get the item associated with index from each underlying dataset.\n Since index is in the range of [0, TotalNumInstances], we need to\n map the index to the dataset before retrieving the item.\n '
return OrderedDict([(key, dataset[self._map_index_to_dataset(key, index)]) for (key, dataset) in self.datasets.items()])
def collater(self, samples: List[Dict]):
'\n Generate a mini-batch for this dataset.\n To convert this into a regular mini-batch we use the following\n logic:\n 1. Select a dataset using the specified probability distribution.\n 2. Call the collater function of the selected dataset.\n '
if (len(samples) == 0):
return None
selected_key = self.sampling_func(list(self.datasets.keys()))
selected_samples = [sample[selected_key] for sample in samples]
return self.datasets[selected_key].collater(selected_samples)
def num_tokens(self, index: int):
"\n Return an example's length (number of tokens), used for batching. Here\n we return the max across all examples at index across all underlying\n datasets.\n "
return max((dataset.num_tokens(self._map_index_to_dataset(key, index)) for (key, dataset) in self.datasets.items()))
def size(self, index: int):
"\n Return an example's size as a float or tuple. Here we return the max\n across all underlying datasets. This value is used when filtering a\n dataset with max-positions.\n "
return max((dataset.size(self._map_index_to_dataset(key, index)) for (key, dataset) in self.datasets.items()))
@property
def supports_prefetch(self):
return all((getattr(dataset, 'supports_prefetch', False) for dataset in self.datasets.values()))
def prefetch(self, indices):
for (key, dataset) in self.datasets.items():
dataset.prefetch([self._map_index_to_dataset(key, index) for index in indices])
|
def _flatten(dico, prefix=None):
'Flatten a nested dictionary.'
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = ((prefix + '.') if (prefix is not None) else '')
for (k, v) in dico.items():
if (v is None):
continue
new_dico.update(_flatten(v, (prefix + k)))
elif isinstance(dico, list):
for (i, v) in enumerate(dico):
new_dico.update(_flatten(v, (((prefix + '.[') + str(i)) + ']')))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico
|
def _unflatten(dico):
'Unflatten a flattened dictionary into a nested dictionary.'
new_dico = OrderedDict()
for (full_k, v) in dico.items():
full_k = full_k.split('.')
node = new_dico
for k in full_k[:(- 1)]:
if (k.startswith('[') and k.endswith(']')):
k = int(k[1:(- 1)])
if (k not in node):
node[k] = OrderedDict()
node = node[k]
node[full_k[(- 1)]] = v
return new_dico
|
class NestedDictionaryDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
super().__init__()
self.defn = _flatten(defn)
self.sizes = ([sizes] if (not isinstance(sizes, (list, tuple))) else sizes)
first = None
for v in self.defn.values():
if (not isinstance(v, (FairseqDataset, torch.utils.data.Dataset))):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
first = (first or v)
if (len(v) > 0):
assert (len(v) == len(first)), 'dataset lengths must match'
self._len = len(first)
def __getitem__(self, index):
return OrderedDict(((k, ds[index]) for (k, ds) in self.defn.items()))
def __len__(self):
return self._len
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch suitable for forwarding with a Model\n '
if (len(samples) == 0):
return {}
sample = OrderedDict()
for (k, ds) in self.defn.items():
try:
sample[k] = ds.collater([s[k] for s in samples])
except NotImplementedError:
sample[k] = default_collate([s[k] for s in samples])
return _unflatten(sample)
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
return max((s[index] for s in self.sizes))
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
if (len(self.sizes) == 1):
return self.sizes[0][index]
else:
return (s[index] for s in self.sizes)
@property
def supports_prefetch(self):
'Whether this dataset supports prefetching.'
return any((ds.supports_prefetch for ds in self.defn.values()))
def prefetch(self, indices):
'Prefetch the data required for this epoch.'
for ds in self.defn.values():
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.defn.values():
ds.set_epoch(epoch)
|
class NumSamplesDataset(FairseqDataset):
def __getitem__(self, index):
return 1
def __len__(self):
return 0
def collater(self, samples):
return sum(samples)
|
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples)
|
class OffsetTokensDataset(BaseWrapperDataset):
def __init__(self, dataset, offset):
super().__init__(dataset)
self.offset = offset
def __getitem__(self, idx):
return (self.dataset[idx] + self.offset)
|
class PadDataset(BaseWrapperDataset):
def __init__(self, dataset, pad_idx, left_pad):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
def collater(self, samples):
return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad)
|
class LeftPadDataset(PadDataset):
def __init__(self, dataset, pad_idx):
super().__init__(dataset, pad_idx, left_pad=True)
|
class RightPadDataset(PadDataset):
def __init__(self, dataset, pad_idx):
super().__init__(dataset, pad_idx, left_pad=False)
|
class PlasmaArray(object):
'\n Wrapper around numpy arrays that automatically moves the data to shared\n memory upon serialization. This is particularly helpful when passing numpy\n arrays through multiprocessing, so that data is not unnecessarily\n duplicated or pickled.\n '
def __init__(self, array):
super().__init__()
self.array = array
self.disable = (array.nbytes < 134217728)
self.object_id = None
self.path = None
self._client = None
self._server = None
self._server_tmp = None
self._plasma = None
@property
def plasma(self):
if ((self._plasma is None) and (not self.disable)):
try:
import pyarrow.plasma as plasma
self._plasma = plasma
except ImportError:
self._plasma = None
return self._plasma
def start_server(self):
if ((self.plasma is None) or (self._server is not None)):
return
assert (self.object_id is None)
assert (self.path is None)
self._server_tmp = tempfile.NamedTemporaryFile()
self.path = self._server_tmp.name
self._server = subprocess.Popen(['plasma_store', '-m', str(int((1.05 * self.array.nbytes))), '-s', self.path])
@property
def client(self):
if (self._client is None):
assert (self.path is not None)
self._client = self.plasma.connect(self.path)
return self._client
def __getstate__(self):
if (self.plasma is None):
return self.__dict__
if (self.object_id is None):
self.start_server()
self.object_id = self.client.put(self.array)
state = self.__dict__.copy()
del state['array']
state['_client'] = None
state['_server'] = None
state['_server_tmp'] = None
state['_plasma'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
if (self.plasma is None):
return
self.array = self.client.get(self.object_id)
def __del__(self):
if (self._server is not None):
self._server.kill()
self._server = None
self._server_tmp.close()
self._server_tmp = None
|
class PrependDataset(BaseWrapperDataset):
def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
super().__init__(dataset)
self.prepend_getter = prepend_getter
self.ensure_first_token = ensure_first_token_is
def __getitem__(self, idx):
item = self.dataset[idx]
is_tuple = isinstance(item, tuple)
src = (item[0] if is_tuple else item)
assert ((self.ensure_first_token is None) or (src[0] == self.ensure_first_token))
prepend_idx = self.prepend_getter(self.dataset, idx)
assert isinstance(prepend_idx, int)
src[0] = prepend_idx
item = (tuple(((src,) + item[1:])) if is_tuple else src)
return item
|
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if (token is not None):
self._sizes = (np.array(dataset.sizes) + 1)
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if (self.token is not None):
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if (self.token is not None):
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if (self.token is not None):
n += 1
return n
|
class RawLabelDataset(FairseqDataset):
def __init__(self, labels):
super().__init__()
self.labels = labels
def __getitem__(self, index):
return self.labels[index]
def __len__(self):
return len(self.labels)
def collater(self, samples):
return torch.tensor(samples)
|
class ReplaceDataset(BaseWrapperDataset):
'Replaces tokens found in the dataset by a specified replacement token\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to replace tokens in\n replace_map(Dictionary[int,int]): map of token to replace -> replacement token\n offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be\n as many as the number of objects returned by the underlying dataset __getitem__ method.\n '
def __init__(self, dataset, replace_map, offsets):
super().__init__(dataset)
assert (len(replace_map) > 0)
self.replace_map = replace_map
self.offsets = offsets
def __getitem__(self, index):
item = self.dataset[index]
is_tuple = isinstance(item, tuple)
srcs = (item if is_tuple else [item])
for (offset, src) in zip(self.offsets, srcs):
for (k, v) in self.replace_map.items():
src_off = (src[offset:] if (offset >= 0) else src[:offset])
src_off.masked_fill_((src_off == k), v)
item = (srcs if is_tuple else srcs[0])
return item
|
class ResamplingDataset(BaseWrapperDataset):
'Randomly samples from a given dataset at each epoch.\n\n Sampling is done with or without replacement, depending on the "replace"\n parameter.\n\n Optionally, the epoch size can be rescaled. This is potentially desirable\n to increase per-epoch coverage of the base dataset (since sampling with\n replacement means that many items in the dataset will be left out). In the\n case of sampling without replacement, size_ratio should be strictly less\n than 1.\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset on which to sample.\n weights (List[float]): list of probability weights\n (default: None, which corresponds to uniform sampling).\n replace (bool): sampling mode; True for "with replacement", or False\n for "without replacement" (default: True)\n size_ratio (float): the ratio to subsample to; must be positive\n (default: 1.0).\n batch_by_size (bool): whether or not to batch by sequence length\n (default: True).\n seed (int): RNG seed to use (default: 0).\n epoch (int): starting epoch number (default: 0).\n '
def __init__(self, dataset, weights=None, replace=True, size_ratio=1.0, batch_by_size=True, seed=0, epoch=0):
super().__init__(dataset)
if (weights is None):
self.weights = None
else:
assert (len(weights) == len(dataset))
weights_arr = np.array(weights, dtype=np.float64)
weights_arr /= weights_arr.sum()
self.weights = plasma_utils.PlasmaArray(weights_arr)
self.replace = replace
assert (size_ratio > 0.0)
if (not self.replace):
assert (size_ratio < 1.0)
self.size_ratio = float(size_ratio)
self.actual_size = np.ceil((len(dataset) * self.size_ratio)).astype(int)
self.batch_by_size = batch_by_size
self.seed = seed
self._cur_epoch = None
self._cur_indices = None
self.set_epoch(epoch)
def __getitem__(self, index):
return self.dataset[self._cur_indices.array[index]]
def __len__(self):
return self.actual_size
@property
def sizes(self):
if isinstance(self.dataset.sizes, list):
return [s[self._cur_indices.array] for s in self.dataset.sizes]
return self.dataset.sizes[self._cur_indices.array]
def num_tokens(self, index):
return self.dataset.num_tokens(self._cur_indices.array[index])
def size(self, index):
return self.dataset.size(self._cur_indices.array[index])
def ordered_indices(self):
if self.batch_by_size:
order = [np.arange(len(self)), self.sizes]
return np.lexsort(order)
else:
return np.arange(len(self))
def prefetch(self, indices):
self.dataset.prefetch(self._cur_indices.array[indices])
def set_epoch(self, epoch):
super().set_epoch(epoch)
if (epoch == self._cur_epoch):
return
self._cur_epoch = epoch
rng = np.random.RandomState([42, (self.seed % (2 ** 32)), self._cur_epoch])
self._cur_indices = plasma_utils.PlasmaArray(rng.choice(len(self.dataset), self.actual_size, replace=self.replace, p=(None if (self.weights is None) else self.weights.array)))
|
class RoundRobinZipDatasets(FairseqDataset):
'Zip multiple :class:`~fairseq.data.FairseqDataset` instances together.\n\n Shorter datasets are repeated in a round-robin fashion to match the length\n of the longest one.\n\n Args:\n datasets (Dict[~fairseq.data.FairseqDataset]): a dictionary of\n :class:`~fairseq.data.FairseqDataset` instances.\n eval_key (str, optional): a key used at evaluation time that causes\n this instance to pass-through batches from *datasets[eval_key]*.\n '
def __init__(self, datasets, eval_key=None):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
self.eval_key = eval_key
self.longest_dataset = None
self.longest_dataset_key = None
for (key, dataset) in datasets.items():
assert isinstance(dataset, FairseqDataset)
if ((self.longest_dataset is None) or (len(dataset) > len(self.longest_dataset))):
self.longest_dataset = dataset
self.longest_dataset_key = key
self._ordered_indices = None
def _map_index(self, key, index):
assert (self._ordered_indices is not None), 'Must call RoundRobinZipDatasets.ordered_indices() first'
return self._ordered_indices[key][(index % len(self.datasets[key]))]
def __getitem__(self, index):
if (self.eval_key is None):
return OrderedDict([(key, dataset[self._map_index(key, index)]) for (key, dataset) in self.datasets.items()])
else:
return self.datasets[self.eval_key][self._map_index(self.eval_key, index)]
def __len__(self):
return len(self.longest_dataset)
def collater(self, samples):
'Merge a list of samples to form a mini-batch.'
if (len(samples) == 0):
return None
if (self.eval_key is None):
return OrderedDict([(key, dataset.collater([sample[key] for sample in samples])) for (key, dataset) in self.datasets.items()])
else:
return self.datasets[self.eval_key].collater(samples)
def num_tokens(self, index):
"Return an example's length (number of tokens), used for batching."
return max((dataset.num_tokens(self._map_index(key, index)) for (key, dataset) in self.datasets.items()))
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return {key: dataset.size(self._map_index(key, index)) for (key, dataset) in self.datasets.items()}
def ordered_indices(self):
'Ordered indices for batching.'
if (self._ordered_indices is None):
self._ordered_indices = OrderedDict([(key, dataset.ordered_indices()) for (key, dataset) in self.datasets.items()])
return np.arange(len(self))
@property
def supports_prefetch(self):
return all((getattr(dataset, 'supports_prefetch', False) for dataset in self.datasets.values()))
def prefetch(self, indices):
for (key, dataset) in self.datasets.items():
dataset.prefetch([self._map_index(key, index) for index in indices])
|
class ShardedDataset(BaseWrapperDataset):
'A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.\n\n Loads a dataset which has been sharded into multiple files. each shard is only loaded for each specific epoch\n\n '
def __init__(self, dictionary, dataset_impl: str, path: str, split: str, epoch: int, name: str=None, combine: bool=False, seed: int=0):
self._name = (name if (name is not None) else os.path.basename(path))
num_shards = 0
for i in itertools.count():
if (not os.path.exists(os.path.join(path, ('shard' + str(i))))):
break
num_shards += 1
if ((num_shards > 0) and (split == 'train')):
random.seed((seed ^ epoch))
shard = random.randint(0, (num_shards - 1))
split_path = os.path.join(path, ('shard' + str(shard)), split)
else:
split_path = os.path.join(path, split)
if os.path.isdir(split_path):
split_path = os.path.join(split_path, split)
dataset = data_utils.load_indexed_dataset(split_path, dictionary, dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
super().__init__(dataset)
@property
def name(self):
return self._name
|
class SortDataset(BaseWrapperDataset):
def __init__(self, dataset, sort_order):
super().__init__(dataset)
if (not isinstance(sort_order, (list, tuple))):
sort_order = [sort_order]
self.sort_order = sort_order
assert all(((len(so) == len(dataset)) for so in sort_order))
def ordered_indices(self):
return np.lexsort(self.sort_order)
|
class StripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
return item[item.ne(self.id_to_strip)]
|
class SubsampleDataset(BaseWrapperDataset):
'Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to subsample\n size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive)\n '
def __init__(self, dataset, size_ratio):
super().__init__(dataset)
assert (size_ratio < 1)
self.actual_size = np.ceil((len(dataset) * size_ratio)).astype(int)
self.indices = np.random.choice(list(range(len(self.dataset))), self.actual_size, replace=False)
print('subsampled dataset from {} to {} (ratio={})'.format(len(self.dataset), self.actual_size, size_ratio))
def __getitem__(self, index):
return self.dataset[self.indices[index]]
def __len__(self):
return self.actual_size
def collater(self, samples):
return self.dataset.collater(samples)
@property
def sizes(self):
return self.dataset.sizes[self.indices]
@property
def name(self):
return self.dataset.name
def num_tokens(self, index):
return self.dataset.num_tokens(self.indices[index])
def size(self, index):
return self.dataset.size(self.indices[index])
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def prefetch(self, indices):
self.dataset.prefetch(self.indices[indices])
|
class TransformEosDataset(FairseqDataset):
'A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.\n\n Note that the transformation is applied in :func:`collater`.\n\n Args:\n dataset (~fairseq.data.FairseqDataset): dataset to wrap\n eos (int): index of the end-of-sentence symbol\n append_eos_to_src (bool, optional): append EOS to the end of src\n remove_eos_from_src (bool, optional): remove EOS from the end of src\n append_eos_to_tgt (bool, optional): append EOS to the end of tgt\n remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt\n '
def __init__(self, dataset, eos, append_eos_to_src=False, remove_eos_from_src=False, append_eos_to_tgt=False, remove_eos_from_tgt=False, has_target=True):
if (not isinstance(dataset, FairseqDataset)):
raise ValueError('dataset must be an instance of FairseqDataset')
if (append_eos_to_src and remove_eos_from_src):
raise ValueError('cannot combine append_eos_to_src and remove_eos_from_src')
if (append_eos_to_tgt and remove_eos_from_tgt):
raise ValueError('cannot combine append_eos_to_tgt and remove_eos_from_tgt')
self.dataset = dataset
self.eos = torch.LongTensor([eos])
self.append_eos_to_src = append_eos_to_src
self.remove_eos_from_src = remove_eos_from_src
self.append_eos_to_tgt = append_eos_to_tgt
self.remove_eos_from_tgt = remove_eos_from_tgt
self.has_target = has_target
self._src_delta = 0
self._src_delta += (1 if append_eos_to_src else 0)
self._src_delta -= (1 if remove_eos_from_src else 0)
self._tgt_delta = 0
self._tgt_delta += (1 if append_eos_to_tgt else 0)
self._tgt_delta -= (1 if remove_eos_from_tgt else 0)
self._checked_src = False
self._checked_tgt = False
def _check_src(self, src, expect_eos):
if (not self._checked_src):
assert ((src[(- 1)] == self.eos[0]) == expect_eos)
self._checked_src = True
def _check_tgt(self, tgt, expect_eos):
if (self.has_target and (not self._checked_tgt)):
assert ((tgt[(- 1)] == self.eos[0]) == expect_eos)
self._checked_tgt = True
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
def transform(item):
if self.append_eos_to_src:
self._check_src(item['source'], expect_eos=False)
item['source'] = torch.cat([item['source'], self.eos])
if self.remove_eos_from_src:
self._check_src(item['source'], expect_eos=True)
item['source'] = item['source'][:(- 1)]
if self.append_eos_to_tgt:
self._check_tgt(item['target'], expect_eos=False)
item['target'] = torch.cat([item['target'], self.eos])
if self.remove_eos_from_tgt:
self._check_tgt(item['target'], expect_eos=True)
item['target'] = item['target'][:(- 1)]
return item
samples = list(map(transform, samples))
return self.dataset.collater(samples)
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
if self.has_target:
(src_len, tgt_len) = self.dataset.size(index)
return ((src_len + self._src_delta), (tgt_len + self._tgt_delta))
else:
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
|
class TruncateDataset(BaseWrapperDataset):
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert (truncation_length is not None)
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if (item_len > self.truncation_length):
item = item[:self.truncation_length]
return item
@property
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset)
|
def is_master(args):
return (args.distributed_rank == 0)
|
def infer_init_method(args):
if (args.distributed_init_method is not None):
return
if all(((key in os.environ) for key in ['MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'])):
args.distributed_init_method = 'env://'
args.distributed_world_size = int(os.environ['WORLD_SIZE'])
args.distributed_rank = int(os.environ['RANK'])
elif (args.distributed_port > 0):
node_list = os.environ.get('SLURM_STEP_NODELIST')
if (node_list is None):
node_list = os.environ.get('SLURM_JOB_NODELIST')
if (node_list is not None):
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(host=hostnames.split()[0].decode('utf-8'), port=args.distributed_port)
nnodes = int(os.environ.get('SLURM_NNODES'))
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if (ntasks_per_node is not None):
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get('SLURM_NTASKS'))
nnodes = int(os.environ.get('SLURM_NNODES'))
assert ((ntasks % nnodes) == 0)
ntasks_per_node = int((ntasks / nnodes))
if (ntasks_per_node == 1):
assert ((args.distributed_world_size % nnodes) == 0)
gpus_per_node = (args.distributed_world_size // nnodes)
node_id = int(os.environ.get('SLURM_NODEID'))
args.distributed_rank = (node_id * gpus_per_node)
else:
assert (ntasks_per_node == (args.distributed_world_size // nnodes))
args.distributed_no_spawn = True
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e:
raise e
except FileNotFoundError:
pass
|
def distributed_init(args):
if (args.distributed_world_size == 1):
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
print('| distributed init (rank {}): {}'.format(args.distributed_rank, args.distributed_init_method), flush=True)
dist.init_process_group(backend=args.distributed_backend, init_method=args.distributed_init_method, world_size=args.distributed_world_size, rank=args.distributed_rank)
print('| initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank), flush=True)
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
else:
dist.all_reduce(torch.zeros(1))
suppress_output(is_master(args))
args.distributed_rank = torch.distributed.get_rank()
return args.distributed_rank
|
def suppress_output(is_master):
'Suppress printing on the current device. Force printing with `force=True`.'
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def get_rank():
return dist.get_rank()
|
def get_world_size():
return dist.get_world_size()
|
def get_default_group():
return dist.group.WORLD
|
def all_reduce(tensor, group=None):
if (group is None):
group = get_default_group()
return dist.all_reduce(tensor, group=group)
|
def all_gather_list(data, group=None, max_size=16384):
'Gathers arbitrary data from all nodes into a list.\n\n Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python\n data. Note that *data* must be picklable.\n\n Args:\n data (Any): data from the local worker to be gathered on other workers\n group (optional): group of the collective\n max_size (int, optional): maximum size of the data to be gathered\n across workers\n '
rank = get_rank()
world_size = get_world_size()
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
enc = pickle.dumps(data)
enc_size = len(enc)
if ((enc_size + 2) > max_size):
raise ValueError('encoded data exceeds max_size: {}'.format((enc_size + 2)))
assert (max_size < (255 * 256))
cpu_buffer[0] = (enc_size // 255)
cpu_buffer[1] = (enc_size % 255)
cpu_buffer[2:(enc_size + 2)] = torch.ByteTensor(list(enc))
start = (rank * max_size)
size = (enc_size + 2)
buffer[start:(start + size)].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[(i * max_size):((i + 1) * max_size)]
size = ((255 * utils.item(out_buffer[0])) + utils.item(out_buffer[1]))
if (size > 0):
result.append(pickle.loads(bytes(out_buffer[2:(size + 2)].tolist())))
return result
except pickle.UnpicklingError:
raise Exception('Unable to unpickle data from other workers. all_gather_list requires all workers to enter the function together, so this error usually indicates that the workers have fallen out of sync somehow. Workers can fall out of sync if one of them runs out of memory, or if there are other conditions in your training script that can cause one worker to finish an epoch while other workers are still iterating over their portions of the data.')
|
def load_archive_file(archive_file):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
print("Archive name '{}' was not found in archive name list. We assumed '{}' was a path or URL but couldn't find any file associated to this path or URL.".format(archive_file, archive_file))
return None
if (resolved_archive_file == archive_file):
print('loading archive file {}'.format(archive_file))
else:
print('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if (not os.path.isdir(resolved_archive_file)):
tempdir = tempfile.mkdtemp()
print('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, ('r:' + ext)) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
|
def url_to_filename(url, etag=None):
"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the URL's, delimited\n by a period.\n "
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
return filename
|
def filename_to_url(filename, cache_dir=None):
'\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n '
if (cache_dir is None):
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise EnvironmentError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise EnvironmentError('file {} not found'.format(meta_path))
with open(meta_path, encoding='utf-8') as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag)
|
def cached_path(url_or_filename, cache_dir=None):
"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n "
if (cache_dir is None):
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in ('http', 'https', 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
|
def split_s3_path(url):
'Split a full s3 path into the bucket name and path.'
parsed = urlparse(url)
if ((not parsed.netloc) or (not parsed.path)):
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
return (bucket_name, s3_path)
|
def s3_request(func):
'\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n '
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if (int(exc.response['Error']['Code']) == 404):
raise EnvironmentError('file {} not found'.format(url))
else:
raise
return wrapper
|
@s3_request
def s3_etag(url):
'Check ETag on S3 object.'
import boto3
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
|
@s3_request
def s3_get(url, temp_file):
'Pull a file directly from S3.'
import boto3
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = (int(content_length) if (content_length is not None) else None)
progress = tqdm(unit='B', total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
|
def get_from_cache(url, cache_dir=None):
"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n "
if (cache_dir is None):
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
if url.startswith('s3://'):
etag = s3_etag(url)
else:
try:
import requests
response = requests.head(url, allow_redirects=True)
if (response.status_code != 200):
etag = None
else:
etag = response.headers.get('ETag')
except EnvironmentError:
etag = None
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if ((not os.path.exists(cache_path)) and (etag is None)):
matching_files = fnmatch.filter(os.listdir(cache_dir), (filename + '.*'))
matching_files = list(filter((lambda s: (not s.endswith('.json'))), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[(- 1)])
if (not os.path.exists(cache_path)):
with tempfile.NamedTemporaryFile() as temp_file:
logger.info('%s not found in cache, downloading to %s', url, temp_file.name)
if url.startswith('s3://'):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
temp_file.flush()
temp_file.seek(0)
logger.info('copying %s to cache at %s', temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info('removing temp file %s', temp_file.name)
return cache_path
|
def read_set_from_file(filename):
'\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = (ext if dot else ext[1:])
return (ext.lower() if lower else ext)
|
def from_pretrained(model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', archive_map=None, **kwargs):
from fairseq import checkpoint_utils, file_utils
if (archive_map is not None):
if (model_name_or_path in archive_map):
model_name_or_path = archive_map[model_name_or_path]
if ((data_name_or_path is not None) and (data_name_or_path in archive_map)):
data_name_or_path = archive_map[data_name_or_path]
model_path = file_utils.load_archive_file(model_name_or_path)
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for (file, arg) in {'code': 'bpe_codes', 'bpecodes': 'bpe_codes', 'sentencepiece.bpe.model': 'sentencepiece_vocab'}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if ('user_dir' in kwargs):
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
(models, args, task) = checkpoint_utils.load_model_ensemble_and_task([os.path.join(model_path, cpt) for cpt in checkpoint_file.split(':')], arg_overrides=kwargs)
return {'args': args, 'task': task, 'models': models}
|
class GeneratorHubInterface(nn.Module):
'\n PyTorch Hub interface for generating sequences from a pre-trained\n translation or language model.\n '
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
for model in self.models:
model.make_generation_fast_(beamable_mm_beam_size=(None if getattr(args, 'no_beamable_mm', False) else getattr(args, 'beam', 5)), need_attn=getattr(args, 'print_alignment', False))
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentence: str, beam: int=5, verbose: bool=False, **kwargs) -> str:
return self.sample(sentence, beam, verbose, **kwargs)
def sample(self, sentence: str, beam: int=1, verbose: bool=False, **kwargs) -> str:
input = self.encode(sentence)
hypo = self.generate(input, beam, verbose, **kwargs)[0]['tokens']
return self.decode(hypo)
def generate(self, tokens: torch.LongTensor, beam: int=5, verbose: bool=False, **kwargs) -> torch.LongTensor:
sample = self._build_sample(tokens)
gen_args = copy.copy(self.args)
gen_args.beam = beam
for (k, v) in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(gen_args)
translations = self.task.inference_step(generator, self.models, sample)
if verbose:
src_str_with_unk = self.string(tokens)
print('S\t{}'.format(src_str_with_unk))
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
hypos = translations[0]
if verbose:
for hypo in hypos:
hypo_str = self.decode(hypo['tokens'])
print('H\t{}\t{}'.format(hypo['score'], hypo_str))
print('P\t{}'.format(' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
if ((hypo['alignment'] is not None) and getarg('print_alignment', False)):
print('A\t{}'.format(' '.join(map((lambda x: str(utils.item(x))), hypo['alignment'].int().cpu()))))
return hypos
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_sample(self, src_tokens: torch.LongTensor):
assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference([src_tokens], [src_tokens.numel()])
sample = dataset.collater([dataset[0]])
sample = utils.apply_to_sample((lambda tensor: tensor.to(self.device)), sample)
return sample
|
class BPEHubInterface(object):
'PyTorch Hub interface for Byte-Pair Encoding (BPE).'
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert (self.bpe is not None)
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
|
class TokenizerHubInterface(object):
'PyTorch Hub interface for tokenization.'
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert (self.tokenizer is not None)
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
|
class LegacyDistributedDataParallel(nn.Module):
'Implements distributed data parallelism at the module level.\n\n A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.\n This version uses a c10d process group for communication and does not\n broadcast buffers.\n\n Args:\n module (~torch.nn.Module): module to be parallelized\n world_size (int): number of parallel workers\n process_group (optional): the c10d process group to be used for\n distributed data all-reduction. If None, the default process group\n will be used.\n buffer_size (int, optional): number of elements to buffer before\n performing all-reduce (default: 256M).\n '
def __init__(self, module, world_size, process_group=None, buffer_size=(2 ** 28)):
super().__init__()
self.module = module
self.world_size = world_size
self.process_group = process_group
self.buffer_size = min(buffer_size, sum((p.numel() for p in module.parameters())))
self.buffer = None
self.need_reduction = False
self.accumulate_grads = False
self._register_grad_hook()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
self._register_grad_hook()
@contextmanager
def no_sync(self):
'A context manager to disable gradient synchronization.'
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
(yield)
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def _register_grad_hook(self):
'\n This function registers the callback all-reduction function for the\n NCCL backend. All gradients will be all reduced in one single step.\n The NCCL reduction will directly be enqueued into the default CUDA\n stream. Therefore, no synchronization is needed.\n '
def all_reduce(params):
buffer = self.buffer
nonzero_buffer = False
if (len(params) > 1):
offset = 0
for p in params:
sz = p.numel()
if (p.grad is not None):
buffer[offset:(offset + sz)].copy_(p.grad.data.view((- 1)))
nonzero_buffer = True
else:
buffer[offset:(offset + sz)].zero_()
offset += sz
else:
p = params[0]
if (p.grad is not None):
buffer = p.grad.data
nonzero_buffer = True
elif (p.numel() <= self.buffer.numel()):
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
offset = 0
for p in params:
sz = p.numel()
if (p.grad is not None):
p.grad.data.copy_(buffer[offset:(offset + sz)].view_as(p))
else:
p.grad = buffer[offset:(offset + sz)].view_as(p).clone()
offset += sz
def reduction_fn():
if ((not self.need_reduction) or self.accumulate_grads):
return
self.need_reduction = False
if (self.buffer is None):
self.buffer = next(self.module.parameters()).new(self.buffer_size)
offset = 0
buffered_params = []
for param in self.module.parameters():
if (not param.requires_grad):
continue
if (param.grad is None):
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works with gradients that don't require grad")
sz = param.numel()
if (sz > self.buffer.numel()):
all_reduce([param])
else:
if ((offset + sz) > self.buffer.numel()):
all_reduce(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if (len(buffered_params) > 0):
all_reduce(buffered_params)
for p in self.module.parameters():
def allreduce_hook(*unused):
self.need_reduction = True
Variable._execution_engine.queue_callback(reduction_fn)
if p.requires_grad:
p.register_hook(allreduce_hook)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
class TimeMeter(object):
'Computes the average occurrence of some event per second'
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return (self.n / self.elapsed_time)
@property
def elapsed_time(self):
return (self.init + (time.time() - self.start))
|
class StopwatchMeter(object):
'Computes the sum/avg duration of some event in seconds'
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if (self.start_time is not None):
delta = (time.time() - self.start_time)
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return (self.sum / self.n)
|
def build_model(args, task):
return ARCH_MODEL_REGISTRY[args.arch].build_model(args, task)
|
def register_model(name):
"\n New model types can be added to fairseq with the :func:`register_model`\n function decorator.\n\n For example::\n\n @register_model('lstm')\n class LSTM(FairseqEncoderDecoderModel):\n (...)\n\n .. note:: All models must implement the :class:`BaseFairseqModel` interface.\n Typically you will extend :class:`FairseqEncoderDecoderModel` for\n sequence-to-sequence tasks or :class:`FairseqLanguageModel` for\n language modeling tasks.\n\n Args:\n name (str): the name of the model\n "
def register_model_cls(cls):
if (name in MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model ({})'.format(name))
if (not issubclass(cls, BaseFairseqModel)):
raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
|
def register_model_architecture(model_name, arch_name):
"\n New model architectures can be added to fairseq with the\n :func:`register_model_architecture` function decorator. After registration,\n model architectures can be selected with the ``--arch`` command-line\n argument.\n\n For example::\n\n @register_model_architecture('lstm', 'lstm_luong_wmt_en_de')\n def lstm_luong_wmt_en_de(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000)\n (...)\n\n The decorated function should take a single argument *args*, which is a\n :class:`argparse.Namespace` of arguments parsed from the command-line. The\n decorated function should modify these arguments in-place to match the\n desired architecture.\n\n Args:\n model_name (str): the name of the Model (Model must already be\n registered)\n arch_name (str): the name of the model architecture (``--arch``)\n "
def register_model_arch_fn(fn):
if (model_name not in MODEL_REGISTRY):
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if (arch_name in ARCH_MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if (not callable(fn)):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
|
def _skeptical_unmasking(output_scores, output_masks, p):
sorted_index = output_scores.sort((- 1))[1]
boundary_len = ((output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p).long()
skeptical_mask = (new_arange(output_masks) < boundary_len)
return skeptical_mask.scatter(1, sorted_index, skeptical_mask)
|
@register_model('cmlm_transformer')
class CMLMNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
def forward(self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs):
assert (not self.decoder.src_embedding_copy), 'do not support embedding copy.'
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
(length_out, length_tgt) = self.decoder.forward_length_prediction(encoder_out, tgt_tokens)
(word_ins_out, word_ins_tgt, _) = self.decoder(prev_output_tokens, encoder_out=encoder_out, tgt_tokens=tgt_tokens)
word_ins_mask = prev_output_tokens.eq(self.unk)
return {'word_ins_out': word_ins_out, 'word_ins_tgt': word_ins_tgt, 'word_ins_mask': word_ins_mask, 'length_out': length_out, 'length_tgt': length_tgt, 'length_w': self.decoder.length_loss_factor}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out['step']
max_step = decoder_out['max_step']
output_tokens = decoder_out['output_tokens']
output_scores = decoder_out['output_scores']
output_masks = output_tokens.eq(self.unk)
(_scores, _tokens) = self.decoder(output_tokens, encoder_out=encoder_out, decoding_format=decoding_format)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if ((step + 1) < max_step):
skeptical_mask = _skeptical_unmasking(output_scores, output_tokens.ne(self.pad), (1 - ((step + 1) / max_step)))
output_tokens.masked_fill_(skeptical_mask, self.unk)
output_scores.masked_fill_(skeptical_mask, 0.0)
return {'output_tokens': output_tokens, 'output_scores': output_scores}
|
@register_model_architecture('cmlm_transformer', 'cmlm_transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.0)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.apply_bert_init = getattr(args, 'apply_bert_init', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.sg_length_pred = getattr(args, 'sg_length_pred', False)
args.pred_length_offset = getattr(args, 'pred_length_offset', False)
args.length_loss_factor = getattr(args, 'length_loss_factor', 0.1)
args.ngram_predictor = getattr(args, 'ngram_predictor', 1)
args.src_embedding_copy = getattr(args, 'src_embedding_copy', False)
|
@register_model_architecture('cmlm_transformer', 'cmlm_transformer_wmt_en_de')
def iter_nat_wmt_en_de(args):
base_architecture(args)
|
class CompositeEncoder(FairseqEncoder):
"\n A wrapper around a dictionary of :class:`FairseqEncoder` objects.\n\n We run forward on each encoder and return a dictionary of outputs. The first\n encoder's dictionary is used for initialization.\n\n Args:\n encoders (dict): a dictionary of :class:`FairseqEncoder` objects.\n "
def __init__(self, encoders):
super().__init__(next(iter(encoders.values())).dictionary)
self.encoders = encoders
for key in self.encoders:
self.add_module(key, self.encoders[key])
def forward(self, src_tokens, src_lengths):
'\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n\n Returns:\n dict:\n the outputs from each Encoder\n '
encoder_out = {}
for key in self.encoders:
encoder_out[key] = self.encoders[key](src_tokens, src_lengths)
return encoder_out
def reorder_encoder_out(self, encoder_out, new_order):
'Reorder encoder output according to new_order.'
for key in self.encoders:
encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order)
return encoder_out
def max_positions(self):
return min([self.encoders[key].max_positions() for key in self.encoders])
def upgrade_state_dict(self, state_dict):
for key in self.encoders:
self.encoders[key].upgrade_state_dict(state_dict)
return state_dict
|
def DistributedFairseqModel(args, model):
'\n Wrap a *model* to support distributed data parallel training.\n\n This is similar to the built-in DistributedDataParallel, but allows\n additional configuration of the DistributedDataParallel class to\n use, and also provides easier access to the wrapped model by\n forwarding requests for missing attributes to the wrapped model.\n\n Args:\n args (argparse.Namespace): fairseq args\n model (BaseFairseqModel): model to wrap\n '
assert isinstance(model, nn.Module)
if (args.ddp_backend == 'c10d'):
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False, bucket_cap_mb=args.bucket_cap_mb)
if ('check_reduction' in inspect.getargspec(ddp_class)[0]):
init_kwargs['check_reduction'] = True
if ('find_unused_parameters' in inspect.getargspec(ddp_class)[0]):
init_kwargs['find_unused_parameters'] = args.find_unused_parameters
elif (args.ddp_backend == 'no_c10d'):
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(module=model, world_size=args.distributed_world_size, buffer_size=(2 ** 28))
else:
raise ValueError(('Unknown --ddp-backend: ' + args.ddp_backend))
class _DistributedFairseqModel(ddp_class):
'Extend DistributedDataParallel to check for missing\n attributes in the wrapped module.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs)
|
class FairseqDecoder(nn.Module):
'Base class for decoders.'
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
"\n Args:\n prev_output_tokens (LongTensor): shifted output tokens of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (dict, optional): output from the encoder, used for\n encoder-side attention\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
(x, extra) = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
x = self.output_layer(x)
return (x, extra)
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
"\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
raise NotImplementedError
def output_layer(self, features, **kwargs):
'\n Project features to the default output size, e.g., vocabulary size.\n\n Args:\n features (Tensor): features returned by *extract_features*.\n '
raise NotImplementedError
def get_normalized_probs(self, net_output, log_probs, sample):
"Get normalized probabilities (or log probs) from a net's output."
if (hasattr(self, 'adaptive_softmax') and (self.adaptive_softmax is not None)):
if (sample is not None):
assert ('target' in sample)
target = sample['target']
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return (out.exp_() if (not log_probs) else out)
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
def max_positions(self):
'Maximum input length supported by the decoder.'
return 1000000.0
def upgrade_state_dict(self, state_dict):
'Upgrade a (possibly old) state dict for new versions of fairseq.'
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True
|
class FairseqEncoder(nn.Module):
'Base class for encoders.'
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
'\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n '
raise NotImplementedError
def reorder_encoder_out(self, encoder_out, new_order):
'\n Reorder encoder output according to `new_order`.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n `encoder_out` rearranged according to `new_order`\n '
raise NotImplementedError
def max_positions(self):
'Maximum input length supported by the encoder.'
return 1000000.0
def upgrade_state_dict(self, state_dict):
'Upgrade a (possibly old) state dict for new versions of fairseq.'
return state_dict
|
class FairseqIncrementalDecoder(FairseqDecoder):
'Base class for incremental decoders.\n\n Incremental decoding is a special mode at inference time where the Model\n only receives a single timestep of input corresponding to the previous\n output token (for teacher forcing) and must produce the next output\n *incrementally*. Thus the model must cache any long-term state that is\n needed about the sequence, e.g., hidden states, convolutional states, etc.\n\n Compared to the standard :class:`FairseqDecoder` interface, the incremental\n decoder interface allows :func:`forward` functions to take an extra keyword\n argument (*incremental_state*) that can be used to cache state across\n time-steps.\n\n The :class:`FairseqIncrementalDecoder` interface also defines the\n :func:`reorder_incremental_state` method, which is used during beam search\n to select and reorder the incremental state based on the selection of beams.\n\n To learn more about how incremental decoding works, refer to `this blog\n <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.\n '
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"\n Args:\n prev_output_tokens (LongTensor): shifted output tokens of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (dict, optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict, optional): dictionary used for storing\n state during :ref:`Incremental decoding`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
'Reorder incremental state.\n\n This should be called when the order of the input has changed from the\n previous time step. A typical use case is beam search, where the input\n order changes between time steps based on the selection of beams.\n '
seen = set()
def apply_reorder_incremental_state(module):
if ((module != self) and hasattr(module, 'reorder_incremental_state') and (module not in seen)):
seen.add(module)
module.reorder_incremental_state(incremental_state, new_order)
self.apply(apply_reorder_incremental_state)
def set_beam_size(self, beam_size):
'Sets the beam size in the decoder and all children.'
if (getattr(self, '_beam_size', (- 1)) != beam_size):
seen = set()
def apply_set_beam_size(module):
if ((module != self) and hasattr(module, 'set_beam_size') and (module not in seen)):
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
|
class BaseFairseqModel(nn.Module):
'Base class for fairseq models.'
def __init__(self):
super().__init__()
self._is_generation_fast = False
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
pass
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
raise NotImplementedError('Model must implement the build_model method')
def get_targets(self, sample, net_output):
"Get targets from either the sample or the net's output."
return sample['target']
def get_normalized_probs(self, net_output, log_probs, sample=None):
"Get normalized probabilities (or log probs) from a net's output."
if hasattr(self, 'decoder'):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=(- 1))
else:
return F.softmax(logits, dim=(- 1))
raise NotImplementedError
def extract_features(self, *args, **kwargs):
'Similar to *forward* but only return features.'
return self(*args, **kwargs)
def max_positions(self):
'Maximum length supported by the model.'
return None
def load_state_dict(self, state_dict, strict=True, args=None):
'Copies parameters and buffers from *state_dict* into this module and\n its descendants.\n\n Overrides the method in :class:`nn.Module`. Compared with that method\n this additionally "upgrades" *state_dicts* from old checkpoints.\n '
self.upgrade_state_dict(state_dict)
new_state_dict = prune_state_dict(state_dict, args)
return super().load_state_dict(new_state_dict, strict)
def upgrade_state_dict(self, state_dict):
'Upgrade old state dicts to work with newer code.'
self.upgrade_state_dict_named(state_dict, '')
def upgrade_state_dict_named(self, state_dict, name):
'Upgrade old state dicts to work with newer code.\n\n Args:\n state_dict (dict): state dictionary to upgrade, in place\n name (str): the state dict key corresponding to the current module\n '
assert (state_dict is not None)
def do_upgrade(m, prefix):
if (len(prefix) > 0):
prefix += '.'
for (n, c) in m.named_children():
name = (prefix + n)
if hasattr(c, 'upgrade_state_dict_named'):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, 'upgrade_state_dict'):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def make_generation_fast_(self, **kwargs):
'Optimize model for faster generation.'
if self._is_generation_fast:
return
self._is_generation_fast = True
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError:
return
self.apply(apply_remove_weight_norm)
seen = set()
def apply_make_generation_fast_(module):
if ((module != self) and hasattr(module, 'make_generation_fast_') and (module not in seen)):
seen.add(module)
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode=True):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
'Make model exportable via ONNX trace.'
seen = set()
def apply_prepare_for_onnx_export_(module):
if ((module != self) and hasattr(module, 'prepare_for_onnx_export_') and (module not in seen)):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', **kwargs):
"\n Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model\n file. Downloads and caches the pre-trained model file if needed.\n\n The base implementation returns a\n :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to\n generate translations or sample from language models. The underlying\n :class:`~fairseq.models.FairseqModel` can be accessed via the\n *generator.models* attribute.\n\n Other models may override this to implement custom hub interfaces.\n\n Args:\n model_name_or_path (str): either the name of a pre-trained model to\n load or a path/URL to a pre-trained model state dict\n checkpoint_file (str, optional): colon-separated list of checkpoint\n files in the model archive to ensemble (default: 'model.pt')\n data_name_or_path (str, optional): point args.data to the archive\n at the given path/URL. Can start with '.' or './' to reuse the\n model archive path.\n "
from fairseq import hub_utils
x = hub_utils.from_pretrained(model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs)
print(x['args'])
return hub_utils.GeneratorHubInterface(x['args'], x['task'], x['models'])
@classmethod
def hub_models(cls):
return {}
|
class FairseqEncoderDecoderModel(BaseFairseqModel):
'Base class for encoder-decoder models.\n\n Args:\n encoder (FairseqEncoder): the encoder\n decoder (FairseqDecoder): the decoder\n '
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
assert isinstance(self.encoder, FairseqEncoder)
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., teacher forcing) to\n the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return features
def output_layer(self, features, **kwargs):
'Project features to the default output size (typically vocabulary size).'
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
'Maximum length supported by the model.'
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
'Maximum length supported by the decoder.'
return self.decoder.max_positions()
|
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning('FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead', stacklevel=4)
|
class FairseqMultiModel(BaseFairseqModel):
'Base class for combining multiple encoder-decoder models.'
def __init__(self, encoders, decoders):
super().__init__()
assert (encoders.keys() == decoders.keys())
self.keys = list(encoders.keys())
for key in self.keys:
assert isinstance(encoders[key], FairseqEncoder)
assert isinstance(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict({key: FairseqModel(encoders[key], decoders[key]) for key in self.keys})
@staticmethod
def build_shared_embeddings(dicts: Dict[(str, Dictionary)], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str]=None):
'\n Helper function to build shared embeddings for a set of languages after\n checking that all dicts corresponding to those languages are equivalent.\n\n Args:\n dicts: Dict of lang_id to its corresponding Dictionary\n langs: languages that we want to share embeddings for\n embed_dim: embedding dimension\n build_embedding: callable function to actually build the embedding\n pretrained_embed_path: Optional path to load pretrained embeddings\n '
shared_dict = dicts[langs[0]]
if any(((dicts[lang] != shared_dict) for lang in langs)):
raise ValueError('--share-*-embeddings requires a joined dictionary: --share-encoder-embeddings requires a joined source dictionary, --share-decoder-embeddings requires a joined target dictionary, and --share-all-embeddings requires a joint source + target dictionary.')
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
decoder_outs = {}
for key in self.keys:
encoder_out = self.models[key].encoder(src_tokens, src_lengths, **kwargs)
decoder_outs[key] = self.models[key].decoder(prev_output_tokens, encoder_out, **kwargs)
return decoder_outs
def max_positions(self):
'Maximum length supported by the model.'
return {key: (self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions()) for key in self.keys}
def max_decoder_positions(self):
'Maximum length supported by the decoder.'
return min((model.decoder.max_positions() for model in self.models.values()))
@property
def encoder(self):
return self.models[self.keys[0]].encoder
@property
def decoder(self):
return self.models[self.keys[0]].decoder
|
class FairseqLanguageModel(BaseFairseqModel):
'Base class for decoder-only models.\n\n Args:\n decoder (FairseqDecoder): the decoder\n '
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, **kwargs):
"\n Run the forward pass for a decoder-only model.\n\n Feeds a batch of tokens through the decoder to predict the next tokens.\n\n Args:\n src_tokens (LongTensor): tokens on which to condition the decoder,\n of shape `(batch, tgt_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, seq_len, vocab)`\n - a dictionary with any model-specific outputs\n "
return self.decoder(src_tokens, **kwargs)
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, **kwargs):
"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, seq_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
return self.decoder.extract_features(src_tokens, **kwargs)
def output_layer(self, features, **kwargs):
'Project features to the default output size (typically vocabulary size).'
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
'Maximum length supported by the model.'
return self.decoder.max_positions()
def max_decoder_positions(self):
'Maximum length supported by the decoder.'
return self.decoder.max_positions()
@property
def supported_targets(self):
return {'future'}
|
class FairseqEncoderModel(BaseFairseqModel):
'Base class for encoder-only models.\n\n Args:\n encoder (FairseqEncoder): the encoder\n '
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
assert isinstance(self.encoder, FairseqEncoder)
def forward(self, src_tokens, src_lengths, **kwargs):
"\n Run the forward pass for a encoder-only model.\n\n Feeds a batch of tokens through the encoder to generate features.\n\n Args:\n src_tokens (LongTensor): input tokens of shape `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n the encoder's output, typically of shape `(batch, src_len, features)`\n "
return self.encoder(src_tokens, src_lengths, **kwargs)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"Get normalized probabilities (or log probs) from a net's output."
encoder_out = net_output['encoder_out']
if torch.is_tensor(encoder_out):
logits = encoder_out.float()
if log_probs:
return F.log_softmax(logits, dim=(- 1))
else:
return F.softmax(logits, dim=(- 1))
raise NotImplementedError
def max_positions(self):
'Maximum length supported by the model.'
return self.encoder.max_positions()
|
@register_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if (hasattr(args, 'max_target_positions') and (not hasattr(args, 'tokens_per_sample'))):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=(options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if (args.criterion == 'adaptive_loss') else None), adaptive_softmax_dropout=args.adaptive_softmax_dropout)
return FConvLanguageModel(decoder)
|
@register_model_architecture('fconv_lm', 'fconv_lm')
def base_lm_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13')
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
|
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103')
def fconv_lm_dauphin_wikitext103(args):
layers = '[(850, 6)] * 3'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 5)] * 4'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 4)] * 3'
layers += ' + [(1024, 4)] * 1'
layers += ' + [(2048, 4)] * 1'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000')
base_lm_architecture(args)
|
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw')
def fconv_lm_dauphin_gbw(args):
layers = '[(512, 5)]'
layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3'
layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
base_lm_architecture(args)
|
@register_model('fconv_self_att')
class FConvModelSelfAtt(FairseqEncoderDecoderModel):
@classmethod
def hub_models(cls):
return {'conv.stories': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.bz2', 'data.stories': 'https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2'}
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(((layer is not None) for layer in decoder.attention))
self.pretrained_encoder = pretrained_encoder
if (self.pretrained_encoder is None):
encoders = {'encoder': encoder}
else:
encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder}
self.encoder = CompositeEncoder(encoders)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
(trained_encoder, trained_decoder) = (None, None)
pretrained = eval(args.pretrained)
if pretrained:
print('| loading pretrained model')
trained_model = checkpoint_utils.load_model_ensemble(filenames=[args.pretrained_checkpoint], task=task)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
encoder = FConvEncoder(task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads)
decoder = FConvDecoder(task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
@property
def pretrained(self):
return (self.pretrained_encoder is not None)
|
class FConvEncoder(FairseqEncoder):
'Convolutional encoder'
def __init__(self, dictionary, embed_dim=512, max_positions=1024, convolutions=(((512, 3),) * 20), dropout=0.1, attention=False, attention_nheads=1):
super().__init__(dictionary)
self.dropout = dropout
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, self.padding_idx)
def expand_bool_array(val):
if isinstance(val, bool):
return ([val] * len(convolutions))
return val
attention = expand_bool_array(attention)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.attproj = nn.ModuleList()
for (i, (out_channels, kernel_size)) in enumerate(convolutions):
self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None))
self.convolutions.append(ConvTBC(in_channels, (out_channels * 2), kernel_size, dropout=dropout))
self.attention.append((SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None))
in_channels = out_channels
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
x = (self.embed_tokens(src_tokens) + self.embed_positions(src_tokens))
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x.transpose(0, 1)
x = self.fc1(x)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
if (not encoder_padding_mask.any()):
encoder_padding_mask = None
x = x.transpose(0, 1)
for (proj, conv, attention) in zip(self.projections, self.convolutions, self.attention):
residual = (x if (proj is None) else proj(x))
if (encoder_padding_mask is not None):
x = x.masked_fill(encoder_padding_mask.unsqueeze((- 1)), 0)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_l = ((conv.kernel_size[0] - 1) // 2)
padding_r = (conv.kernel_size[0] // 2)
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if (attention is not None):
x = attention(x)
x = ((x + residual) * math.sqrt(0.5))
x = x.transpose(1, 0)
x = self.fc2(x)
if (encoder_padding_mask is not None):
encoder_padding_mask = encoder_padding_mask.t()
x = x.masked_fill(encoder_padding_mask.unsqueeze((- 1)), 0)
x = GradMultiply.apply(x, (1.0 / (2.0 * self.num_attention_layers)))
y = ((x + input_embedding.transpose(0, 1)) * math.sqrt(0.5))
return {'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out['encoder_out'] = tuple((eo.index_select(0, new_order) for eo in encoder_out['encoder_out']))
if (encoder_out['encoder_padding_mask'] is not None):
encoder_out['encoder_padding_mask'] = encoder_out['encoder_padding_mask'].index_select(0, new_order)
if ('pretrained' in encoder_out):
encoder_out['pretrained']['encoder_out'] = tuple((eo.index_select(0, new_order) for eo in encoder_out['pretrained']['encoder_out']))
return encoder_out
def max_positions(self):
'Maximum input length supported by the encoder.'
return self.embed_positions.max_positions()
|
class FConvDecoder(FairseqDecoder):
'Convolutional decoder'
def __init__(self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=(((512, 3),) * 8), attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout = dropout
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
return ([val] * len(convolutions))
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if ((not isinstance(attention, list)) or (len(attention) != len(convolutions))):
raise ValueError('Attention is expected to be a list of booleans of length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for (i, (out_channels, kernel_size)) in enumerate(convolutions):
self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None))
self.convolutions.append(LinearizedConv1d(in_channels, (out_channels * 2), kernel_size, padding=(kernel_size - 1), dropout=dropout))
self.attention.append((DownsampledMultiHeadAttention(out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False) if attention[i] else None))
self.attproj.append((Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None))
self.selfattention.append((SelfAttention(out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample) if selfattention[i] else None))
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
if self.pretrained:
self.gate1 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid())
self.gate2 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid())
self.joining = nn.Sequential(Linear((out_embed_dim * 2), (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim))
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs['out'] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out):
trained_encoder_out = (encoder_out['pretrained'] if self.pretrained else None)
encoder_out = encoder_out['encoder']['encoder_out']
(encoder_a, encoder_b) = self._split_encoder_out(encoder_out)
positions = self.embed_positions(prev_output_tokens)
x = (self.embed_tokens(prev_output_tokens) + positions)
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
x = self.fc1(x)
x = x.transpose(0, 1)
avg_attn_scores = None
for (proj, conv, attention, selfattention, attproj) in zip(self.projections, self.convolutions, self.attention, self.selfattention, self.attproj):
residual = (x if (proj is None) else proj(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
if (attention is not None):
r = x
(x, attn_scores) = attention((attproj(x) + target_embedding), encoder_a, encoder_b)
x = (x + r)
if ((not self.training) and self.need_attn):
if (avg_attn_scores is None):
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if (selfattention is not None):
x = selfattention(x)
x = ((x + residual) * math.sqrt(0.5))
x = x.transpose(0, 1)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if (not self.pretrained):
x = self.fc3(x)
if self.pretrained:
(trained_x, _) = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs['out']], dim=(- 1))
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = (gate1 * x)
gated_x2 = (gate2 * self.pretrained_outputs['out'])
fusion = torch.cat([gated_x1, gated_x2], dim=(- 1))
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return (fusion_output, avg_attn_scores)
else:
return (x, avg_attn_scores)
def max_positions(self):
'Maximum output length supported by the decoder.'
return self.embed_positions.max_positions()
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
'Split and transpose encoder outputs.'
(encoder_a, encoder_b) = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result
|
class SelfAttention(nn.Module):
def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False):
super().__init__()
self.attention = DownsampledMultiHeadAttention(out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample)
self.in_proj_q = Linear(out_channels, embed_dim)
self.in_proj_k = Linear(out_channels, embed_dim)
self.in_proj_v = Linear(out_channels, embed_dim)
self.ln = LayerNorm(out_channels)
def forward(self, x):
residual = x
query = self.in_proj_q(x)
key = self.in_proj_k(x)
value = self.in_proj_v(x)
(x, _) = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True)
return self.ln((x + residual))
|
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
|
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
m.weight.data.normal_(0, 0.1)
return m
|
def Linear(in_features, out_features, dropout=0.0):
'Weight-normalized Linear layer (input: N x T x C)'
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt(((1 - dropout) / in_features)))
m.bias.data.zero_()
return m
|
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
'Weight-normalized Conv1d layer optimized for decoding'
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt(((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
|
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
'Weight-normalized Conv1d layer'
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt(((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
|
@register_model_architecture('fconv_self_att', 'fconv_self_att')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.self_attention = getattr(args, 'self_attention', 'False')
args.encoder_attention = getattr(args, 'encoder_attention', 'False')
args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1)
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1)
args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1)
args.project_input = getattr(args, 'project_input', 'False')
args.gated_attention = getattr(args, 'gated_attention', 'False')
args.downsample = getattr(args, 'downsample', 'False')
args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '')
args.pretrained = getattr(args, 'pretrained', 'False')
|
@register_model_architecture('fconv_self_att', 'fconv_self_att_wp')
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.self_attention = getattr(args, 'self_attention', 'True')
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4)
args.project_input = getattr(args, 'project_input', 'True')
args.gated_attention = getattr(args, 'gated_attention', 'True')
args.downsample = getattr(args, 'downsample', 'True')
base_architecture(args)
|
@register_model('lightconv_lm')
class LightConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', default=0.1, type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', default=0.0, type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', default=0.0, type=float, metavar='D', help='dropout probability after ReLU in FFN')
parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true', help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings')
parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4, help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2, help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', default=False, action='store_true', help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
'LightConv and DynamicConv arguments'
parser.add_argument('--decoder-kernel-size-list', type=(lambda x: options.eval_str_list(x, int)), help='list of kernel size (default: "[3,7,15,31,31,31]")')
parser.add_argument('--decoder-glu', type=options.eval_bool, help='glu after in proj')
parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution')
parser.add_argument('--weight-softmax', default=True, type=options.eval_bool)
parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = args.tokens_per_sample
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert (args.adaptive_input_factor == args.adaptive_softmax_factor)
assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert (args.decoder_input_dim == args.decoder_output_dim)
decoder = LightConvDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False)
return LightConvLanguageModel(decoder)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.