code stringlengths 17 6.64M |
|---|
class MultiCorpusSampledDataset(FairseqDataset):
'\n Stores multiple instances of FairseqDataset together and in every iteration\n creates a batch by first sampling a dataset according to a specified\n probability distribution and then getting instances from that dataset.\n\n Args:\n datasets: an OrderedDict of FairseqDataset instances.\n sampling_func: A function for sampling over list of dataset keys.\n The default strategy is to sample uniformly.\n '
def __init__(self, datasets: Dict[(str, FairseqDataset)], sampling_func: Callable[([List], int)]=None):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
if (sampling_func is None):
sampling_func = uniform_sampler
self.sampling_func = sampling_func
self.total_num_instances = 0
for (_, dataset) in datasets.items():
assert isinstance(dataset, FairseqDataset)
self.total_num_instances += len(dataset)
self._ordered_indices = None
def __len__(self):
'\n Length of this dataset is the sum of individual datasets\n '
return self.total_num_instances
def ordered_indices(self):
"\n Ordered indices for batching. Here we call the underlying\n dataset's ordered_indices() so that we get the same random ordering\n as we would have from using the underlying dataset directly.\n "
if (self._ordered_indices is None):
self._ordered_indices = OrderedDict([(key, dataset.ordered_indices()) for (key, dataset) in self.datasets.items()])
return np.arange(len(self))
def _map_index_to_dataset(self, key: int, index: int):
'\n Different underlying datasets have different lengths. In order to ensure\n we are not accessing an index outside the range of the current dataset\n size, we wrap around. This function should be called after we have\n created an ordering for this and all underlying datasets.\n '
assert (self._ordered_indices is not None), 'Must call MultiCorpusSampledDataset.ordered_indices() first'
mapped_index = (index % len(self.datasets[key]))
return self._ordered_indices[key][mapped_index]
def __getitem__(self, index: int):
'\n Get the item associated with index from each underlying dataset.\n Since index is in the range of [0, TotalNumInstances], we need to\n map the index to the dataset before retrieving the item.\n '
return OrderedDict([(key, dataset[self._map_index_to_dataset(key, index)]) for (key, dataset) in self.datasets.items()])
def collater(self, samples: List[Dict]):
'\n Generate a mini-batch for this dataset.\n To convert this into a regular mini-batch we use the following\n logic:\n 1. Select a dataset using the specified probability distribution.\n 2. Call the collater function of the selected dataset.\n '
if (len(samples) == 0):
return None
selected_key = self.sampling_func(list(self.datasets.keys()))
selected_samples = [sample[selected_key] for sample in samples]
return self.datasets[selected_key].collater(selected_samples)
def num_tokens(self, index: int):
"\n Return an example's length (number of tokens), used for batching. Here\n we return the max across all examples at index across all underlying\n datasets.\n "
return max((dataset.num_tokens(self._map_index_to_dataset(key, index)) for (key, dataset) in self.datasets.items()))
def size(self, index: int):
"\n Return an example's size as a float or tuple. Here we return the max\n across all underlying datasets. This value is used when filtering a\n dataset with max-positions.\n "
return max((dataset.size(self._map_index_to_dataset(key, index)) for (key, dataset) in self.datasets.items()))
@property
def supports_prefetch(self):
return all((getattr(dataset, 'supports_prefetch', False) for dataset in self.datasets.values()))
def prefetch(self, indices):
for (key, dataset) in self.datasets.items():
dataset.prefetch([self._map_index_to_dataset(key, index) for index in indices])
|
def _flatten(dico, prefix=None):
'Flatten a nested dictionary.'
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = ((prefix + '.') if (prefix is not None) else '')
for (k, v) in dico.items():
if (v is None):
continue
new_dico.update(_flatten(v, (prefix + k)))
elif isinstance(dico, list):
for (i, v) in enumerate(dico):
new_dico.update(_flatten(v, (((prefix + '.[') + str(i)) + ']')))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico
|
def _unflatten(dico):
'Unflatten a flattened dictionary into a nested dictionary.'
new_dico = OrderedDict()
for (full_k, v) in dico.items():
full_k = full_k.split('.')
node = new_dico
for k in full_k[:(- 1)]:
if (k.startswith('[') and k.endswith(']')):
k = int(k[1:(- 1)])
if (k not in node):
node[k] = OrderedDict()
node = node[k]
node[full_k[(- 1)]] = v
return new_dico
|
class NestedDictionaryDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
super().__init__()
self.defn = _flatten(defn)
self.sizes = ([sizes] if (not isinstance(sizes, (list, tuple))) else sizes)
first = None
for v in self.defn.values():
if (not isinstance(v, (FairseqDataset, torch.utils.data.Dataset))):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
first = (first or v)
if (len(v) > 0):
assert (len(v) == len(first)), 'dataset lengths must match'
self._len = len(first)
def __getitem__(self, index):
return OrderedDict(((k, ds[index]) for (k, ds) in self.defn.items()))
def __len__(self):
return self._len
def collater(self, samples):
'Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch suitable for forwarding with a Model\n '
if (len(samples) == 0):
return {}
sample = OrderedDict()
for (k, ds) in self.defn.items():
try:
sample[k] = ds.collater([s[k] for s in samples])
except NotImplementedError:
sample[k] = default_collate([s[k] for s in samples])
return _unflatten(sample)
def num_tokens(self, index):
'Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.'
return max((s[index] for s in self.sizes))
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
if (len(self.sizes) == 1):
return self.sizes[0][index]
else:
return (s[index] for s in self.sizes)
@property
def supports_prefetch(self):
'Whether this dataset supports prefetching.'
return any((ds.supports_prefetch for ds in self.defn.values()))
def prefetch(self, indices):
'Prefetch the data required for this epoch.'
for ds in self.defn.values():
if getattr(ds, 'supports_prefetch', False):
ds.prefetch(indices)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.defn.values():
ds.set_epoch(epoch)
|
class NumSamplesDataset(FairseqDataset):
def __getitem__(self, index):
return 1
def __len__(self):
return 0
def collater(self, samples):
return sum(samples)
|
class NumelDataset(BaseWrapperDataset):
def __init__(self, dataset, reduce=False):
super().__init__(dataset)
self.reduce = reduce
def __getitem__(self, index):
item = self.dataset[index]
if torch.is_tensor(item):
return torch.numel(item)
else:
return np.size(item)
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if self.reduce:
return sum(samples)
else:
return torch.tensor(samples)
|
class OffsetTokensDataset(BaseWrapperDataset):
def __init__(self, dataset, offset):
super().__init__(dataset)
self.offset = offset
def __getitem__(self, idx):
return (self.dataset[idx] + self.offset)
|
class PadDataset(BaseWrapperDataset):
def __init__(self, dataset, pad_idx, left_pad):
super().__init__(dataset)
self.pad_idx = pad_idx
self.left_pad = left_pad
def collater(self, samples):
return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad)
|
class LeftPadDataset(PadDataset):
def __init__(self, dataset, pad_idx):
super().__init__(dataset, pad_idx, left_pad=True)
|
class RightPadDataset(PadDataset):
def __init__(self, dataset, pad_idx):
super().__init__(dataset, pad_idx, left_pad=False)
|
class PlasmaArray(object):
'\n Wrapper around numpy arrays that automatically moves the data to shared\n memory upon serialization. This is particularly helpful when passing numpy\n arrays through multiprocessing, so that data is not unnecessarily\n duplicated or pickled.\n '
def __init__(self, array):
super().__init__()
self.array = array
self.disable = (array.nbytes < 134217728)
self.object_id = None
self.path = None
self._client = None
self._server = None
self._server_tmp = None
self._plasma = None
@property
def plasma(self):
if ((self._plasma is None) and (not self.disable)):
try:
import pyarrow.plasma as plasma
self._plasma = plasma
except ImportError:
self._plasma = None
return self._plasma
def start_server(self):
if ((self.plasma is None) or (self._server is not None)):
return
assert (self.object_id is None)
assert (self.path is None)
self._server_tmp = tempfile.NamedTemporaryFile()
self.path = self._server_tmp.name
self._server = subprocess.Popen(['plasma_store', '-m', str(int((1.05 * self.array.nbytes))), '-s', self.path])
@property
def client(self):
if (self._client is None):
assert (self.path is not None)
self._client = self.plasma.connect(self.path)
return self._client
def __getstate__(self):
if (self.plasma is None):
return self.__dict__
if (self.object_id is None):
self.start_server()
self.object_id = self.client.put(self.array)
state = self.__dict__.copy()
del state['array']
state['_client'] = None
state['_server'] = None
state['_server_tmp'] = None
state['_plasma'] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
if (self.plasma is None):
return
self.array = self.client.get(self.object_id)
def __del__(self):
if (self._server is not None):
self._server.kill()
self._server = None
self._server_tmp.close()
self._server_tmp = None
|
class PrependDataset(BaseWrapperDataset):
def __init__(self, dataset, prepend_getter, ensure_first_token_is=None):
super().__init__(dataset)
self.prepend_getter = prepend_getter
self.ensure_first_token = ensure_first_token_is
def __getitem__(self, idx):
item = self.dataset[idx]
is_tuple = isinstance(item, tuple)
src = (item[0] if is_tuple else item)
assert ((self.ensure_first_token is None) or (src[0] == self.ensure_first_token))
prepend_idx = self.prepend_getter(self.dataset, idx)
assert isinstance(prepend_idx, int)
src[0] = prepend_idx
item = (tuple(((src,) + item[1:])) if is_tuple else src)
return item
|
class PrependTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, token=None):
super().__init__(dataset)
self.token = token
if (token is not None):
self._sizes = (np.array(dataset.sizes) + 1)
else:
self._sizes = dataset.sizes
def __getitem__(self, idx):
item = self.dataset[idx]
if (self.token is not None):
item = torch.cat([item.new([self.token]), item])
return item
@property
def sizes(self):
return self._sizes
def num_tokens(self, index):
n = self.dataset.num_tokens(index)
if (self.token is not None):
n += 1
return n
def size(self, index):
n = self.dataset.size(index)
if (self.token is not None):
n += 1
return n
|
class RawLabelDataset(FairseqDataset):
def __init__(self, labels):
super().__init__()
self.labels = labels
def __getitem__(self, index):
return self.labels[index]
def __len__(self):
return len(self.labels)
def collater(self, samples):
return torch.tensor(samples)
|
class ReplaceDataset(BaseWrapperDataset):
'Replaces tokens found in the dataset by a specified replacement token\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to replace tokens in\n replace_map(Dictionary[int,int]): map of token to replace -> replacement token\n offsets (List[int]): do not replace tokens before (from left if pos, right if neg) this offset. should be\n as many as the number of objects returned by the underlying dataset __getitem__ method.\n '
def __init__(self, dataset, replace_map, offsets):
super().__init__(dataset)
assert (len(replace_map) > 0)
self.replace_map = replace_map
self.offsets = offsets
def __getitem__(self, index):
item = self.dataset[index]
is_tuple = isinstance(item, tuple)
srcs = (item if is_tuple else [item])
for (offset, src) in zip(self.offsets, srcs):
for (k, v) in self.replace_map.items():
src_off = (src[offset:] if (offset >= 0) else src[:offset])
src_off.masked_fill_((src_off == k), v)
item = (srcs if is_tuple else srcs[0])
return item
|
class ResamplingDataset(BaseWrapperDataset):
'Randomly samples from a given dataset at each epoch.\n\n Sampling is done with or without replacement, depending on the "replace"\n parameter.\n\n Optionally, the epoch size can be rescaled. This is potentially desirable\n to increase per-epoch coverage of the base dataset (since sampling with\n replacement means that many items in the dataset will be left out). In the\n case of sampling without replacement, size_ratio should be strictly less\n than 1.\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset on which to sample.\n weights (List[float]): list of probability weights\n (default: None, which corresponds to uniform sampling).\n replace (bool): sampling mode; True for "with replacement", or False\n for "without replacement" (default: True)\n size_ratio (float): the ratio to subsample to; must be positive\n (default: 1.0).\n batch_by_size (bool): whether or not to batch by sequence length\n (default: True).\n seed (int): RNG seed to use (default: 0).\n epoch (int): starting epoch number (default: 0).\n '
def __init__(self, dataset, weights=None, replace=True, size_ratio=1.0, batch_by_size=True, seed=0, epoch=0):
super().__init__(dataset)
if (weights is None):
self.weights = None
else:
assert (len(weights) == len(dataset))
weights_arr = np.array(weights, dtype=np.float64)
weights_arr /= weights_arr.sum()
self.weights = plasma_utils.PlasmaArray(weights_arr)
self.replace = replace
assert (size_ratio > 0.0)
if (not self.replace):
assert (size_ratio < 1.0)
self.size_ratio = float(size_ratio)
self.actual_size = np.ceil((len(dataset) * self.size_ratio)).astype(int)
self.batch_by_size = batch_by_size
self.seed = seed
self._cur_epoch = None
self._cur_indices = None
self.set_epoch(epoch)
def __getitem__(self, index):
return self.dataset[self._cur_indices.array[index]]
def __len__(self):
return self.actual_size
@property
def sizes(self):
if isinstance(self.dataset.sizes, list):
return [s[self._cur_indices.array] for s in self.dataset.sizes]
return self.dataset.sizes[self._cur_indices.array]
def num_tokens(self, index):
return self.dataset.num_tokens(self._cur_indices.array[index])
def size(self, index):
return self.dataset.size(self._cur_indices.array[index])
def ordered_indices(self):
if self.batch_by_size:
order = [np.arange(len(self)), self.sizes]
return np.lexsort(order)
else:
return np.arange(len(self))
def prefetch(self, indices):
self.dataset.prefetch(self._cur_indices.array[indices])
def set_epoch(self, epoch):
super().set_epoch(epoch)
if (epoch == self._cur_epoch):
return
self._cur_epoch = epoch
rng = np.random.RandomState([42, (self.seed % (2 ** 32)), self._cur_epoch])
self._cur_indices = plasma_utils.PlasmaArray(rng.choice(len(self.dataset), self.actual_size, replace=self.replace, p=(None if (self.weights is None) else self.weights.array)))
|
class RollDataset(BaseWrapperDataset):
def __init__(self, dataset, shifts):
super().__init__(dataset)
self.shifts = shifts
def __getitem__(self, index):
item = self.dataset[index]
return torch.roll(item, self.shifts)
|
class RoundRobinZipDatasets(FairseqDataset):
'Zip multiple :class:`~fairseq.data.FairseqDataset` instances together.\n\n Shorter datasets are repeated in a round-robin fashion to match the length\n of the longest one.\n\n Args:\n datasets (Dict[~fairseq.data.FairseqDataset]): a dictionary of\n :class:`~fairseq.data.FairseqDataset` instances.\n eval_key (str, optional): a key used at evaluation time that causes\n this instance to pass-through batches from *datasets[eval_key]*.\n '
def __init__(self, datasets, eval_key=None):
super().__init__()
assert isinstance(datasets, OrderedDict)
self.datasets = datasets
self.eval_key = eval_key
self.longest_dataset = None
self.longest_dataset_key = None
for (key, dataset) in datasets.items():
assert isinstance(dataset, FairseqDataset)
if ((self.longest_dataset is None) or (len(dataset) > len(self.longest_dataset))):
self.longest_dataset = dataset
self.longest_dataset_key = key
self._ordered_indices = None
def _map_index(self, key, index):
assert (self._ordered_indices is not None), 'Must call RoundRobinZipDatasets.ordered_indices() first'
return self._ordered_indices[key][(index % len(self.datasets[key]))]
def __getitem__(self, index):
if (self.eval_key is None):
return OrderedDict([(key, dataset[self._map_index(key, index)]) for (key, dataset) in self.datasets.items()])
else:
return self.datasets[self.eval_key][self._map_index(self.eval_key, index)]
def __len__(self):
return len(self.longest_dataset)
def collater(self, samples):
'Merge a list of samples to form a mini-batch.'
if (len(samples) == 0):
return None
if (self.eval_key is None):
return OrderedDict([(key, dataset.collater([sample[key] for sample in samples])) for (key, dataset) in self.datasets.items()])
else:
return self.datasets[self.eval_key].collater(samples)
def num_tokens(self, index):
"Return an example's length (number of tokens), used for batching."
return max((dataset.num_tokens(self._map_index(key, index)) for (key, dataset) in self.datasets.items()))
def size(self, index):
"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``."
return {key: dataset.size(self._map_index(key, index)) for (key, dataset) in self.datasets.items()}
def ordered_indices(self):
'Ordered indices for batching.'
if (self._ordered_indices is None):
self._ordered_indices = OrderedDict([(key, dataset.ordered_indices()) for (key, dataset) in self.datasets.items()])
return np.arange(len(self))
@property
def supports_prefetch(self):
return all((getattr(dataset, 'supports_prefetch', False) for dataset in self.datasets.values()))
def prefetch(self, indices):
for (key, dataset) in self.datasets.items():
dataset.prefetch([self._map_index(key, index) for index in indices])
|
class ShardedDataset(BaseWrapperDataset):
'A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.\n\n Loads a dataset which has been sharded into multiple files. each shard is only loaded for each specific epoch\n\n '
def __init__(self, dictionary, dataset_impl: str, path: str, split: str, epoch: int, name: str=None, combine: bool=False, seed: int=0):
self._name = (name if (name is not None) else os.path.basename(path))
num_shards = 0
for i in itertools.count():
if (not os.path.exists(os.path.join(path, ('shard' + str(i))))):
break
num_shards += 1
if ((num_shards > 0) and (split == 'train')):
random.seed((seed ^ epoch))
shard = random.randint(0, (num_shards - 1))
split_path = os.path.join(path, ('shard' + str(shard)), split)
else:
split_path = os.path.join(path, split)
if os.path.isdir(split_path):
split_path = os.path.join(split_path, split)
dataset = data_utils.load_indexed_dataset(split_path, dictionary, dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
super().__init__(dataset)
@property
def name(self):
return self._name
|
class SortDataset(BaseWrapperDataset):
def __init__(self, dataset, sort_order):
super().__init__(dataset)
if (not isinstance(sort_order, (list, tuple))):
sort_order = [sort_order]
self.sort_order = sort_order
assert all(((len(so) == len(dataset)) for so in sort_order))
def ordered_indices(self):
return np.lexsort(self.sort_order)
|
class StripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
return item[item.ne(self.id_to_strip)]
|
class SubsampleDataset(BaseWrapperDataset):
'Subsamples a given dataset by a specified ratio. Subsampling is done on the number of examples\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset to subsample\n size_ratio(float): the ratio to subsample to. must be between 0 and 1 (exclusive)\n '
def __init__(self, dataset, size_ratio):
super().__init__(dataset)
assert (size_ratio < 1)
self.actual_size = np.ceil((len(dataset) * size_ratio)).astype(int)
self.indices = np.random.choice(list(range(len(self.dataset))), self.actual_size, replace=False)
logger.info('subsampled dataset from {} to {} (ratio={})'.format(len(self.dataset), self.actual_size, size_ratio))
def __getitem__(self, index):
return self.dataset[self.indices[index]]
def __len__(self):
return self.actual_size
def collater(self, samples):
return self.dataset.collater(samples)
@property
def sizes(self):
return self.dataset.sizes[self.indices]
@property
def name(self):
return self.dataset.name
def num_tokens(self, index):
return self.dataset.num_tokens(self.indices[index])
def size(self, index):
return self.dataset.size(self.indices[index])
def ordered_indices(self):
'Return an ordered list of indices. Batches will be constructed based\n on this order.'
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def prefetch(self, indices):
self.dataset.prefetch(self.indices[indices])
|
class TransformEosDataset(FairseqDataset):
'A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.\n\n Note that the transformation is applied in :func:`collater`.\n\n Args:\n dataset (~fairseq.data.FairseqDataset): dataset to wrap\n eos (int): index of the end-of-sentence symbol\n append_eos_to_src (bool, optional): append EOS to the end of src\n remove_eos_from_src (bool, optional): remove EOS from the end of src\n append_eos_to_tgt (bool, optional): append EOS to the end of tgt\n remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt\n '
def __init__(self, dataset, eos, append_eos_to_src=False, remove_eos_from_src=False, append_eos_to_tgt=False, remove_eos_from_tgt=False, has_target=True):
if (not isinstance(dataset, FairseqDataset)):
raise ValueError('dataset must be an instance of FairseqDataset')
if (append_eos_to_src and remove_eos_from_src):
raise ValueError('cannot combine append_eos_to_src and remove_eos_from_src')
if (append_eos_to_tgt and remove_eos_from_tgt):
raise ValueError('cannot combine append_eos_to_tgt and remove_eos_from_tgt')
self.dataset = dataset
self.eos = torch.LongTensor([eos])
self.append_eos_to_src = append_eos_to_src
self.remove_eos_from_src = remove_eos_from_src
self.append_eos_to_tgt = append_eos_to_tgt
self.remove_eos_from_tgt = remove_eos_from_tgt
self.has_target = has_target
self._src_delta = 0
self._src_delta += (1 if append_eos_to_src else 0)
self._src_delta -= (1 if remove_eos_from_src else 0)
self._tgt_delta = 0
self._tgt_delta += (1 if append_eos_to_tgt else 0)
self._tgt_delta -= (1 if remove_eos_from_tgt else 0)
self._checked_src = False
self._checked_tgt = False
def _check_src(self, src, expect_eos):
if (not self._checked_src):
assert ((src[(- 1)] == self.eos[0]) == expect_eos)
self._checked_src = True
def _check_tgt(self, tgt, expect_eos):
if (self.has_target and (not self._checked_tgt)):
assert ((tgt[(- 1)] == self.eos[0]) == expect_eos)
self._checked_tgt = True
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
def transform(item):
if self.append_eos_to_src:
self.eos = self.eos.to(device=item['source'].device)
self._check_src(item['source'], expect_eos=False)
item['source'] = torch.cat([item['source'], self.eos])
if self.remove_eos_from_src:
self.eos = self.eos.to(device=item['source'].device)
self._check_src(item['source'], expect_eos=True)
item['source'] = item['source'][:(- 1)]
if self.append_eos_to_tgt:
self.eos = self.eos.to(device=item['target'].device)
self._check_tgt(item['target'], expect_eos=False)
item['target'] = torch.cat([item['target'], self.eos])
if self.remove_eos_from_tgt:
self.eos = self.eos.to(device=item['target'].device)
self._check_tgt(item['target'], expect_eos=True)
item['target'] = item['target'][:(- 1)]
return item
samples = list(map(transform, samples))
return self.dataset.collater(samples)
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
if self.has_target:
(src_len, tgt_len) = self.dataset.size(index)
return ((src_len + self._src_delta), (tgt_len + self._tgt_delta))
else:
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
return self.dataset.prefetch(indices)
|
class TruncateDataset(BaseWrapperDataset):
def __init__(self, dataset, truncation_length):
super().__init__(dataset)
assert (truncation_length is not None)
self.truncation_length = truncation_length
self.dataset = dataset
def __getitem__(self, index):
item = self.dataset[index]
item_len = item.size(0)
if (item_len > self.truncation_length):
item = item[:self.truncation_length]
return item
@property
def sizes(self):
return np.minimum(self.dataset.sizes, self.truncation_length)
def __len__(self):
return len(self.dataset)
|
class GELU(torch.nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return torch.nn.functional.gelu(x)
|
class Swish(torch.nn.Module):
def __init__(self):
super(Swish, self).__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return (x * self.sigmoid(x))
|
def get_activation_layer(name):
if (name == 'relu'):
return nn.ReLU(inplace=False)
elif (name == 'leaky'):
return nn.LeakyReLU(negative_slope=0.1, inplace=False)
elif (name == 'selu'):
return nn.SELU(inplace=True)
elif (name == 'elu'):
return nn.ELU(inplace=True)
elif (name == 'celu'):
return nn.CELU(inplace=True)
elif (name == 'prelu'):
return nn.PReLU()
elif (name == 'sigmoid'):
return nn.Sigmoid()
elif (name == 'tanh'):
return nn.Tanh()
elif (name == 'gelu'):
return GELU()
elif (name == 'swish'):
return Swish()
else:
print_error_message('Supported activation functions: {}'.format(activation_list))
return None
|
class DExTraEmb(nn.Module):
'\n This class implements embeddings similar to DeFINE emebeddings introduced in\n https://arxiv.org/abs/1911.12385\n '
def __init__(self, args, map_layer, use_bias: bool=True):
'\n :param args: Argument list\n :param map_layer: Mapping layer (Adaptive or standard)\n :param use_bias: Use bias or not\n '
super(DExTraEmb, self).__init__()
self.map_layer = map_layer
self.input_features = args.delight_emb_map_dim
self.embedding_dim = args.delight_emb_out_dim
self.dextra_layer = DExTraUnit(in_features=self.input_features, in_proj_features=(self.embedding_dim // 2), out_features=self.embedding_dim, width_multiplier=args.delight_emb_width_mult, dextra_depth=args.delight_emb_depth, dextra_dropout=args.delight_dropout, max_glt_groups=args.delight_emb_max_groups, act_type=args.act_type, norm_type=args.norm_type, use_bias=use_bias, is_iclr_version=args.define_iclr, glt_shuffle=args.glt_shuffle)
if args.adaptive_input:
self.embed_scale = 1.0
else:
self.embed_scale = (1.0 if args.no_scale_embedding else math.sqrt(self.input_features))
self.drop_layer = RecurrentDropout(p=args.delight_emb_dropout, batch_first=True)
def forward(self, x):
'\n B --> Batch size\n T --> Time steps\n E --> Embedding dimension\n :param x: Input of shape [B x T]\n :return: Output of shape [B x T x E]\n '
assert (x.dim() == 2), 'Input should be [B x T]'
x = (self.map_layer(x) * self.embed_scale)
x = self.drop_layer(x)
x = self.dextra_layer(x)
return x
def __repr__(self):
s = '{name}(in_features={input_features}, output_features={embedding_dim})'
s += '\n \t {}'.format(self.map_layer)
s += '\n \t {}'.format(self.dextra_layer)
return s.format(name=self.__class__.__name__, **self.__dict__)
def compute_macs_params(self):
emb_params = 0
emb_macs = 0
non_emb_macs = 0
non_emb_params = 0
from fairseq.modules.adaptive_input import AdaptiveInput
if isinstance(self.map_layer, nn.Embedding):
emb_params += self.map_layer.weight.numel()
emb_macs += 0
elif isinstance(self.map_layer, AdaptiveInput):
mac_params_adaptive = self.map_layer.compute_macs_params()
emb_macs += mac_params_adaptive['embedding_macs']
emb_params += mac_params_adaptive['embedding_params']
non_emb_macs += mac_params_adaptive['proj_macs']
non_emb_params += mac_params_adaptive['proj_params']
macs_params_define = self.dextra_layer.compute_macs_params()
non_emb_macs += macs_params_define['macs']
non_emb_params += macs_params_define['params']
return {'name': self.__class__.__name__, 'emb_params': emb_params, 'emb_macs': emb_macs, 'non_emb_macs': non_emb_macs, 'non_emb_params': non_emb_params}
|
class RecurrentDropout(nn.Module):
'\n Applies the same dropout mask across all time steps\n '
def __init__(self, p, batch_first=False):
'\n :param p: Dropout probability\n :param batch_first: Batch first or not\n '
super().__init__()
self.p = p
self.keep_p = (1.0 - p)
self.batch_first = batch_first
def forward(self, x):
'\n :param x: Input of dimension [B x T x C] (batch first) or [T x B x C]\n :return: output of dimension [B x T x C] (batch first) or [T x B x C]\n '
if (not self.training):
return x
if ((self.p <= 0) or (self.p >= 1)):
return x
assert (x.dim() == 3), 'Input should be [B x T x C] or [T x B x C]'
if self.batch_first:
m = x.new_empty(x.size(0), 1, x.size(2), requires_grad=False).bernoulli_(self.keep_p)
else:
m = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(self.keep_p)
m = m.div_(self.keep_p)
m = m.expand_as(x)
return (m * x)
def __repr__(self):
s = '{name}(p={p})'
return s.format(name=self.__class__.__name__, **self.__dict__)
|
def bound_function(low, high, value):
return max(low, min(high, value))
|
class GroupLinear(nn.Module):
'\n This class implements the Grouped Linear Transform\n This is based on the Pyramidal recurrent unit paper:\n https://arxiv.org/abs/1808.09029\n '
def __init__(self, in_features: int, out_features: int, n_groups: int=4, use_bias: bool=False, use_shuffle: bool=False, norm_type: Optional[str]=None, dropout: float=0.0, act_type: Optional[str]=None):
'\n\n :param in_features: number of input features\n :param out_features: number of output features\n :param n_groups: number of groups in GLT\n :param use_bias: use bias or not\n :param use_shuffle: shuffle features between different groups\n :param norm_type: Normalization type (e.g. LayerNorm)\n :param dropout: Dropout value (default is 0.0)\n :param act_type: Activation type (e.g., Gelu or ReLU)\n '
super(GroupLinear, self).__init__()
if ((in_features % n_groups) != 0):
err_msg = 'Input dimensions ({}) must be divisible by n_groups ({})'.format(in_features, n_groups)
print_error_message(err_msg)
if ((out_features % n_groups) != 0):
err_msg = 'Output dimensions ({}) must be divisible by n_groups ({})'.format(out_features, n_groups)
print_error_message(err_msg)
in_groups = (in_features // n_groups)
out_groups = (out_features // n_groups)
self.weights = nn.Parameter(torch.Tensor(n_groups, in_groups, out_groups))
if use_bias:
self.bias = nn.Parameter(torch.Tensor(n_groups, 1, out_groups))
else:
self.bias = None
if (norm_type is not None):
self.normalization_fn = get_norm_layer(name=norm_type, out_features=out_groups)
self.norm_type = norm_type
else:
self.normalization_fn = None
self.norm_type = None
self.use_dropout = False
self.drop_p = dropout
if (dropout > 0):
self.drop_layer = nn.Dropout(p=dropout)
self.use_dropout = True
if (act_type is not None):
self.act_fn = get_activation_layer(name=act_type)
self.act_type = act_type
else:
self.act_fn = None
self.act_type = None
self.n_groups = n_groups
self.use_bias = use_bias
self.shuffle = use_shuffle
self.feature_shuffle = (True if use_shuffle else False)
self.in_features = in_features
self.out_features = out_features
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weights.data)
if self.use_bias:
nn.init.constant_(self.bias.data, 0)
def process_input_bmm(self, x):
'\n N --> Input dimension\n M --> Output dimension\n g --> groups\n G --> gates\n :param x: Input of dimension B x N\n :return: Output of dimension B x M\n '
bsz = x.size(0)
x = x.contiguous().view(bsz, self.n_groups, (- 1))
x = x.transpose(0, 1)
x = torch.bmm(x, self.weights)
if self.use_bias:
x = torch.add(x, self.bias)
if self.feature_shuffle:
x = x.permute(1, 2, 0)
x = x.contiguous().view(bsz, self.n_groups, (- 1))
else:
x = x.transpose(0, 1)
if (self.normalization_fn is not None):
x = self.normalization_fn(x)
if (self.act_fn is not None):
x = self.act_fn(x)
return x
def forward(self, x):
'\n :param x: Input of shape [T x B x N] (should work with [B x T x N]\n :return:\n '
if (x.dim() == 2):
x = self.process_input_bmm(x)
elif (x.dim() == 3):
(T, B, N) = x.size()
x = x.contiguous().view((B * T), (- 1))
x = self.process_input_bmm(x)
x = x.contiguous().view(T, B, (- 1))
else:
raise NotImplementedError
if self.use_dropout:
x = self.drop_layer(x)
return x
def __repr__(self):
s = '{name}(in_features={in_features}, out_features={out_features}, num_groups={n_groups}'
if self.use_bias:
s += ', bias={use_bias}'
if self.shuffle:
s += ', shuffle={shuffle}'
if (self.norm_type is not None):
s += ', norm_type={norm_type}'
if (self.act_type is not None):
s += ', act_type={act_type}'
if (self.drop_p > 0.0):
s += ', drop_p={drop_p}'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def compute_macs_params(self):
'\n # of operations in group linear transformation (GLT) are given as:\n Let N and M be dimensions of the input and the output tensor\n Both input and output are split into G groups, so that each input and output group has dimension of N/G and M/G\n Each input group of dimension N/G is mapped to each output group of dimension M/G using a matrix with dimensions [N/G x M/G].\n This mapping involves NM/G^2 additions and NM/G^2 multiplications.\n Since, there are G such groups, we will have total of NM/G addiations and NM/G multipplications.\n Or in simple words, total multiplication-additions (MACs) would be NM/G and FLOPs would be 2NM/G.\n\n Relationship with # of parameters:\n We have G matrices, each of dimension [N/G x M/G]. The number of parameters in each matrix is NM/G^2.\n Therefore, the total number of parameters in GLT is NM/G.\n\n MACs = parameters\n '
n_mul_wt = self.weights.numel()
n_add_bias = (self.bias.numel() if self.use_bias else 0)
macs = (n_mul_wt + n_add_bias)
n_params = (n_mul_wt + n_add_bias)
if (self.normalization_fn is not None):
n_params += sum([p.numel() for p in self.normalization_fn.parameters()])
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params}
|
class Linear(nn.Module):
'\n This class implements the fully connected layer\n '
def __init__(self, in_features, out_features, use_bias=True, num_gates=1, norm_type=None, dropout=0.0, act_type=None):
'\n :param in_features: number of input features\n :param out_features: number of output features\n :param use_bias: use bias or not\n :param num_gates: number of gates (useful if you want to use it within gating structures, like LSTMs)\n :param norm_type: Normalization type (e.g. LayerNorm)\n :param dropout: Dropout value (default is 0.0)\n :param act_type: Activation type (e.g., Gelu or ReLU)\n '
super(Linear, self).__init__()
self.weights = torch.nn.Parameter(torch.Tensor((out_features * num_gates), in_features))
if use_bias:
self.bias = torch.nn.Parameter(torch.Tensor((out_features * num_gates)))
else:
self.bias = None
if (norm_type is not None):
self.normalization_fn = get_norm_layer(name=norm_type, out_features=(out_features * num_gates))
self.norm_type = norm_type
else:
self.normalization_fn = None
self.norm_type = None
self.use_dropout = False
self.drop_p = dropout
if (dropout > 0):
self.drop_layer = nn.Dropout(p=dropout)
self.use_dropout = True
if (act_type is not None):
self.act_fn = get_activation_layer(name=act_type)
self.act_type = act_type
else:
self.act_fn = None
self.act_type = None
self.in_features = in_features
self.out_features = out_features
self.use_bias = use_bias
self.gates = num_gates
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weights.data)
if self.use_bias:
nn.init.constant_(self.bias.data, 0)
def forward(self, x):
'\n :param x: Input\n :return: Output\n '
x = F.linear(x, weight=self.weights, bias=self.bias)
if (self.normalization_fn is not None):
x = self.normalization_fn(x)
if (self.act_fn is not None):
x = self.act_fn(x)
if self.use_dropout:
x = self.drop_layer(x)
return x
def __repr__(self):
s = '{name}(in_features={in_features}, out_features={out_features}'
if self.use_bias:
s += ', bias={use_bias}'
if (self.gates > 1):
s += ', gates={gates}'
if (self.norm_type is not None):
s += ', norm_type={norm_type}'
if (self.act_type is not None):
s += ', act_type={act_type}'
if (self.drop_p > 0.0):
s += ', drop_p={drop_p}'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
def compute_macs_params(self):
'\n # of operations in LT are given as:\n Let N and M be dimensions of the input and the output tensor\n Input dimension N is mapped to output of dimension M using a matrix with dimensions [N x M].\n This conversion will involve NM additions and NM multiplications.\n Or in simple words, total multiplication-additions (MACs) would be NM and FLOPs would be 2NM.\n\n Relationship with # of parameters:\n We have a matrix of dimension [N x M]. The number of parameters is NM.\n Therefore, the total number of parameters in LT is NM.\n\n MACs = parameters and FLOPs = 2 * parameters\n '
n_mul_wt = self.weights.numel()
n_add_bias = (self.bias.numel() if self.use_bias else 0)
macs = (n_mul_wt + n_add_bias)
n_params = (n_mul_wt + n_add_bias)
if (self.normalization_fn is not None):
n_params += sum([p.numel() for p in self.normalization_fn.parameters()])
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params}
|
def get_weight_layer(name: str, in_features: int, out_features: int, groups: int=4, use_bias: bool=True, gates: int=1, shuffle: bool=False, norm_type: Optional[str]=None, dropout: float=0.0, act_type: Optional[str]=None):
if ((name == 'glt') and (groups == 1)):
name = 'linear'
if (name == 'linear'):
layer = Linear(in_features=in_features, out_features=out_features, use_bias=use_bias, num_gates=gates, norm_type=norm_type, dropout=dropout, act_type=act_type)
elif (name == 'glt'):
layer = GroupLinear(in_features=in_features, out_features=out_features, n_groups=groups, use_bias=use_bias, use_shuffle=shuffle, norm_type=norm_type, dropout=dropout, act_type=act_type)
else:
raise NotImplementedError
return layer
|
def get_embedding_layer(num_embeddings, embedding_dim, padding_idx=None):
emb = nn.Embedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(emb.weight, mean=0, std=(embedding_dim ** (- 0.5)))
nn.init.constant_(emb.weight[padding_idx], 0)
return emb
|
class BatchNorm(nn.Module):
def __init__(self, num_features, eps=1e-05, affine=True):
super(BatchNorm, self).__init__()
self.layer = nn.BatchNorm1d(num_features=num_features, eps=eps, affine=affine)
def forward(self, x):
if (x.dim() == 3):
(bsz, seq_len, feature_size) = x.size()
out = self.layer(x.view((- 1), feature_size))
return out.contiguous().view(bsz, seq_len, (- 1))
else:
return self.layer(x)
|
def get_norm_layer(name, out_features, num_groups=1, eps=1e-05, affine=True):
if ((name == 'gn') and (num_groups == 1)):
name = 'bn'
if (name == 'bn'):
return BatchNorm(num_features=out_features, eps=eps, affine=affine)
elif (name == 'ln'):
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(out_features, eps, affine)
except:
return nn.LayerNorm(out_features, eps=eps, elementwise_affine=affine)
elif (name == 'gn'):
return nn.GroupNorm(num_groups=num_groups, num_channels=out_features, eps=eps, affine=affine)
else:
print_error_message('Supported normalization functions: {}'.format(norm_layer_list))
return None
|
def get_curr_time_stamp():
return time.strftime('%Y-%m-%d %H:%M:%S')
|
def print_error_message(message):
time_stamp = get_curr_time_stamp()
error_str = (((text_colors['error'] + text_colors['bold']) + 'ERROR ') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, error_str, message))
print('{} - {} - {}'.format(time_stamp, error_str, 'Exiting!!!'))
exit((- 1))
|
def print_log_message(message):
time_stamp = get_curr_time_stamp()
log_str = (((text_colors['logs'] + text_colors['bold']) + 'LOGS ') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, log_str, message))
|
def print_warning_message(message):
time_stamp = get_curr_time_stamp()
warn_str = (((text_colors['warning'] + text_colors['bold']) + 'WARNING') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, warn_str, message))
|
def print_info_message(message):
time_stamp = get_curr_time_stamp()
info_str = (((text_colors['info'] + text_colors['bold']) + 'INFO ') + text_colors['end_color'])
print('{} - {} - {}'.format(time_stamp, info_str, message))
|
def print_dash_line():
print((((text_colors['error'] + text_colors['bold']) + ('=' * 100)) + text_colors['end_color']))
|
def print_header(header):
print_dash_line()
print(((((text_colors['info'] + text_colors['bold']) + ('=' * 50)) + str(header)) + text_colors['end_color']))
print_dash_line()
|
def print_header_minor(header):
print(((((text_colors['warning'] + text_colors['bold']) + ('=' * 25)) + str(header)) + text_colors['end_color']))
|
def is_master(args):
return (args.distributed_rank == 0)
|
def infer_init_method(args):
if (args.distributed_init_method is not None):
return
if all(((key in os.environ) for key in ['MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'])):
args.distributed_init_method = 'env://'
args.distributed_world_size = int(os.environ['WORLD_SIZE'])
args.distributed_rank = int(os.environ['RANK'])
elif (args.distributed_port > 0):
node_list = os.environ.get('SLURM_STEP_NODELIST')
if (node_list is None):
node_list = os.environ.get('SLURM_JOB_NODELIST')
if (node_list is not None):
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(host=hostnames.split()[0].decode('utf-8'), port=args.distributed_port)
nnodes = int(os.environ.get('SLURM_NNODES'))
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if (ntasks_per_node is not None):
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get('SLURM_NTASKS'))
nnodes = int(os.environ.get('SLURM_NNODES'))
assert ((ntasks % nnodes) == 0)
ntasks_per_node = int((ntasks / nnodes))
if (ntasks_per_node == 1):
assert ((args.distributed_world_size % nnodes) == 0)
gpus_per_node = (args.distributed_world_size // nnodes)
node_id = int(os.environ.get('SLURM_NODEID'))
args.distributed_rank = (node_id * gpus_per_node)
else:
assert (ntasks_per_node == (args.distributed_world_size // nnodes))
args.distributed_no_spawn = True
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e:
raise e
except FileNotFoundError:
pass
|
def distributed_init(args):
if (args.distributed_world_size == 1):
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(args.distributed_rank, args.distributed_init_method))
dist.init_process_group(backend=args.distributed_backend, init_method=args.distributed_init_method, world_size=args.distributed_world_size, rank=args.distributed_rank)
logger.info('initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank))
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
else:
dist.all_reduce(torch.zeros(1))
if is_master(args):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
args.distributed_rank = torch.distributed.get_rank()
return args.distributed_rank
|
def get_rank():
return dist.get_rank()
|
def get_world_size():
return dist.get_world_size()
|
def get_default_group():
return dist.group.WORLD
|
def all_reduce(tensor, group=None):
if (group is None):
group = get_default_group()
return dist.all_reduce(tensor, group=group)
|
def all_gather_list(data, group=None, max_size=16384):
'Gathers arbitrary data from all nodes into a list.\n\n Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python\n data. Note that *data* must be picklable.\n\n Args:\n data (Any): data from the local worker to be gathered on other workers\n group (optional): group of the collective\n max_size (int, optional): maximum size of the data to be gathered\n across workers\n '
rank = get_rank()
world_size = get_world_size()
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4
size = (header_size + enc_size)
if (size > max_size):
raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))
header = struct.pack('>I', enc_size)
cpu_buffer[:size] = torch.ByteTensor(list((header + enc)))
start = (rank * max_size)
buffer[start:(start + size)].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[(i * max_size):((i + 1) * max_size)]
(enc_size,) = struct.unpack('>I', bytes(out_buffer[:header_size].tolist()))
if (enc_size > 0):
result.append(pickle.loads(bytes(out_buffer[header_size:(header_size + enc_size)].tolist())))
return result
except pickle.UnpicklingError:
raise Exception('Unable to unpickle data from other workers. all_gather_list requires all workers to enter the function together, so this error usually indicates that the workers have fallen out of sync somehow. Workers can fall out of sync if one of them runs out of memory, or if there are other conditions in your training script that can cause one worker to finish an epoch while other workers are still iterating over their portions of the data. Try rerunning with --ddp-backend=no_c10d and see if that helps.')
|
class PathManager():
"\n Wrapper for insulating OSS I/O (using Python builtin operations) from\n fvcore's PathManager abstraction (for transparently handling various\n internal backends).\n "
@staticmethod
def open(path: str, mode: str='r', buffering: int=(- 1), encoding: Optional[str]=None, errors: Optional[str]=None, newline: Optional[str]=None):
if FVCorePathManager:
return FVCorePathManager.open(path=path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline)
return open(path, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline)
@staticmethod
def copy(src_path: str, dst_path: str, overwrite: bool=False) -> bool:
if FVCorePathManager:
return FVCorePathManager.copy(src_path=src_path, dst_path=dst_path, overwrite=overwrite)
return shutil.copyfile(src_path, dst_path)
@staticmethod
def get_local_path(path: str) -> str:
if FVCorePathManager:
return FVCorePathManager.get_local_path(path)
return path
@staticmethod
def exists(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.exists(path)
return os.path.exists(path)
@staticmethod
def isfile(path: str) -> bool:
if FVCorePathManager:
return FVCorePathManager.isfile(path)
return os.path.isfile(path)
@staticmethod
def ls(path: str) -> List[str]:
if FVCorePathManager:
return FVCorePathManager.ls(path)
return os.listdir(path)
@staticmethod
def mkdirs(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.mkdirs(path)
os.makedirs(path, exist_ok=True)
@staticmethod
def rm(path: str) -> None:
if FVCorePathManager:
return FVCorePathManager.rm(path)
os.remove(path)
@staticmethod
def register_handler(handler) -> None:
if FVCorePathManager:
return FVCorePathManager.register_handler(handler=handler)
|
def load_archive_file(archive_file):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info("Archive name '{}' was not found in archive name list. We assumed '{}' was a path or URL but couldn't find any file associated to this path or URL.".format(archive_file, archive_file))
return None
if (resolved_archive_file == archive_file):
logger.info('loading archive file {}'.format(archive_file))
else:
logger.info('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file))
tempdir = None
if (not os.path.isdir(resolved_archive_file)):
tempdir = tempfile.mkdtemp()
logger.info('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir))
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, ('r:' + ext)) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
|
def url_to_filename(url, etag=None):
"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the URL's, delimited\n by a period.\n "
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
return filename
|
def filename_to_url(filename, cache_dir=None):
'\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n '
if (cache_dir is None):
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if (not os.path.exists(cache_path)):
raise EnvironmentError('file {} not found'.format(cache_path))
meta_path = (cache_path + '.json')
if (not os.path.exists(meta_path)):
raise EnvironmentError('file {} not found'.format(meta_path))
with open(meta_path, encoding='utf-8') as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return (url, etag)
|
def cached_path(url_or_filename, cache_dir=None):
"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n "
if (cache_dir is None):
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if (parsed.scheme in ('http', 'https', 's3')):
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
return url_or_filename
elif (parsed.scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
|
def split_s3_path(url):
'Split a full s3 path into the bucket name and path.'
parsed = urlparse(url)
if ((not parsed.netloc) or (not parsed.path)):
raise ValueError('bad s3 path {}'.format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
if s3_path.startswith('/'):
s3_path = s3_path[1:]
return (bucket_name, s3_path)
|
def s3_request(func):
'\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n '
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if (int(exc.response['Error']['Code']) == 404):
raise EnvironmentError('file {} not found'.format(url))
else:
raise
return wrapper
|
@s3_request
def s3_etag(url):
'Check ETag on S3 object.'
import boto3
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
|
@s3_request
def s3_get(url, temp_file):
'Pull a file directly from S3.'
import boto3
s3_resource = boto3.resource('s3')
(bucket_name, s3_path) = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
|
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = (int(content_length) if (content_length is not None) else None)
progress = tqdm(unit='B', total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
|
def get_from_cache(url, cache_dir=None):
"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n "
if (cache_dir is None):
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
if url.startswith('s3://'):
etag = s3_etag(url)
else:
try:
import requests
response = requests.head(url, allow_redirects=True)
if (response.status_code != 200):
etag = None
else:
etag = response.headers.get('ETag')
except EnvironmentError:
etag = None
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if ((not os.path.exists(cache_path)) and (etag is None)):
matching_files = fnmatch.filter(os.listdir(cache_dir), (filename + '.*'))
matching_files = list(filter((lambda s: (not s.endswith('.json'))), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[(- 1)])
if (not os.path.exists(cache_path)):
with tempfile.NamedTemporaryFile() as temp_file:
logger.info('%s not found in cache, downloading to %s', url, temp_file.name)
if url.startswith('s3://'):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
temp_file.flush()
temp_file.seek(0)
logger.info('copying %s to cache at %s', temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info('removing temp file %s', temp_file.name)
return cache_path
|
def read_set_from_file(filename):
'\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n '
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
|
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = (ext if dot else ext[1:])
return (ext.lower() if lower else ext)
|
def from_pretrained(model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', archive_map=None, **kwargs):
from fairseq import checkpoint_utils, file_utils
if (archive_map is not None):
if (model_name_or_path in archive_map):
model_name_or_path = archive_map[model_name_or_path]
if ((data_name_or_path is not None) and (data_name_or_path in archive_map)):
data_name_or_path = archive_map[data_name_or_path]
if isinstance(model_name_or_path, dict):
for (k, v) in model_name_or_path.items():
if (k == 'checkpoint_file'):
checkpoint_file = v
elif ((k != 'path') and (k not in kwargs)):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for (file, arg) in {'code': 'bpe_codes', 'bpecodes': 'bpe_codes', 'sentencepiece.bpe.model': 'sentencepiece_vocab'}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if ('user_dir' in kwargs):
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
(models, args, task) = checkpoint_utils.load_model_ensemble_and_task([os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)], arg_overrides=kwargs)
return {'args': args, 'task': task, 'models': models}
|
class GeneratorHubInterface(nn.Module):
'\n PyTorch Hub interface for generating sequences from a pre-trained\n translation or language model.\n '
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
for model in self.models:
model.make_generation_fast_(beamable_mm_beam_size=(None if getattr(args, 'no_beamable_mm', False) else getattr(args, 'beam', 5)), need_attn=getattr(args, 'print_alignment', False))
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(self.task.max_positions(), *[model.max_positions() for model in models])
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int=5, verbose: bool=False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int=1, verbose: bool=False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(self, tokenized_sentences: List[torch.LongTensor], beam: int=5, verbose: bool=False, skip_invalid_size_inputs=False, **kwargs) -> List[List[Dict[(str, torch.Tensor)]]]:
if (torch.is_tensor(tokenized_sentences) and (tokenized_sentences.dim() == 1)):
return self.generate(tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs)[0]
gen_args = copy.copy(self.args)
gen_args.beam = beam
for (k, v) in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(gen_args)
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample((lambda t: t.to(self.device)), batch)
translations = self.task.inference_step(generator, self.models, batch)
for (id, hypos) in zip(batch['id'].tolist(), translations):
results.append((id, hypos))
outputs = [hypos for (_, hypos) in sorted(results, key=(lambda x: x[0]))]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for (source_tokens, target_hypotheses) in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
if ((hypo['alignment'] is not None) and getarg('print_alignment', False)):
logger.info('A\t{}'.format(' '.join(map((lambda x: str(utils.item(x))), hypo['alignment'].int().cpu()))))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(self, tokens: List[List[int]], skip_invalid_size_inputs: bool) -> Iterator[Dict[(str, Any)]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs).next_epoch_itr(shuffle=False)
return batch_iterator
|
class BPEHubInterface(object):
'PyTorch Hub interface for Byte-Pair Encoding (BPE).'
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert (self.bpe is not None)
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
|
class TokenizerHubInterface(object):
'PyTorch Hub interface for tokenization.'
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert (self.tokenizer is not None)
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
|
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return '{}.{}'.format(self._incremental_state_id, key)
def get_incremental_state(self, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]], key: str) -> Optional[Dict[(str, Optional[Tensor])]]:
'Helper for getting incremental state for an nn.Module.'
full_key = self._get_full_incremental_state_key(key)
if ((incremental_state is None) or (full_key not in incremental_state)):
return None
return incremental_state[full_key]
def set_incremental_state(self, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]], key: str, value: Dict[(str, Optional[Tensor])]) -> Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]:
'Helper for setting incremental state for an nn.Module.'
if (incremental_state is not None):
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
|
def with_incremental_state(cls):
cls.__bases__ = ((FairseqIncrementalState,) + tuple((b for b in cls.__bases__ if (b != FairseqIncrementalState))))
return cls
|
class LegacyDistributedDataParallel(nn.Module):
'Implements distributed data parallelism at the module level.\n\n A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.\n This version uses a c10d process group for communication and does not\n broadcast buffers.\n\n Args:\n module (~torch.nn.Module): module to be parallelized\n world_size (int): number of parallel workers\n process_group (optional): the c10d process group to be used for\n distributed data all-reduction. If None, the default process group\n will be used.\n buffer_size (int, optional): number of elements to buffer before\n performing all-reduce (default: 256M).\n '
def __init__(self, module, world_size, process_group=None, buffer_size=(2 ** 28)):
super().__init__()
self.module = module
self.world_size = world_size
self.process_group = process_group
self.buffer_size = min(buffer_size, sum((p.numel() for p in module.parameters())))
self.buffer = None
self.need_reduction = False
self.accumulate_grads = False
self._register_grad_hook()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
self._register_grad_hook()
@contextmanager
def no_sync(self):
'A context manager to disable gradient synchronization.'
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
(yield)
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def _register_grad_hook(self):
'\n This function registers the callback all-reduction function for the\n NCCL backend. All gradients will be all reduced in one single step.\n The NCCL reduction will directly be enqueued into the default CUDA\n stream. Therefore, no synchronization is needed.\n '
def all_reduce(params):
buffer = self.buffer
nonzero_buffer = False
if (len(params) > 1):
offset = 0
for p in params:
sz = p.numel()
if (p.grad is not None):
buffer[offset:(offset + sz)].copy_(p.grad.data.view((- 1)))
nonzero_buffer = True
else:
buffer[offset:(offset + sz)].zero_()
offset += sz
else:
p = params[0]
if (p.grad is not None):
buffer = p.grad.data
nonzero_buffer = True
elif (p.numel() <= self.buffer.numel()):
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
offset = 0
for p in params:
sz = p.numel()
if (p.grad is not None):
p.grad.data.copy_(buffer[offset:(offset + sz)].view_as(p))
else:
p.grad = buffer[offset:(offset + sz)].view_as(p).clone()
offset += sz
def reduction_fn():
if ((not self.need_reduction) or self.accumulate_grads):
return
self.need_reduction = False
if (self.buffer is None):
self.buffer = next(self.module.parameters()).new(self.buffer_size)
offset = 0
buffered_params = []
for param in self.module.parameters():
if (not param.requires_grad):
continue
if (param.grad is None):
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works with gradients that don't require grad")
sz = param.numel()
if (sz > self.buffer.numel()):
all_reduce([param])
else:
if ((offset + sz) > self.buffer.numel()):
all_reduce(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if (len(buffered_params) > 0):
all_reduce(buffered_params)
for p in self.module.parameters():
def allreduce_hook(*unused):
self.need_reduction = True
Variable._execution_engine.queue_callback(reduction_fn)
if p.requires_grad:
p.register_hook(allreduce_hook)
|
class Meter(object):
'Base class for Meters.'
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
'Smoothed value used for logging.'
raise NotImplementedError
|
def safe_round(number, ndigits):
if hasattr(number, '__round__'):
return round(number, ndigits)
else:
return number
|
class AverageMeter(Meter):
'Computes and stores the average and current value'
def __init__(self, round: Optional[int]=None):
self.round = round
self.reset()
def reset(self):
self.val = None
self.sum = 0
self.count = 0
def update(self, val, n=1):
if (val is not None):
self.val = val
if (n > 0):
self.sum += (val * n)
self.count += n
def state_dict(self):
return {'val': self.val, 'sum': self.sum, 'count': self.count, 'round': self.round}
def load_state_dict(self, state_dict):
self.val = state_dict['val']
self.sum = state_dict['sum']
self.count = state_dict['count']
self.round = state_dict.get('round', None)
@property
def avg(self):
return ((self.sum / self.count) if (self.count > 0) else self.val)
@property
def smoothed_value(self) -> float:
val = self.avg
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val
|
class TimeMeter(Meter):
'Computes the average occurrence of some event per second'
def __init__(self, init: int=0, n: int=0, round: Optional[int]=None):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
def update(self, val=1):
self.n += val
def state_dict(self):
return {'init': self.elapsed_time, 'n': self.n, 'round': self.round}
def load_state_dict(self, state_dict):
if ('start' in state_dict):
self.reset(init=state_dict['init'])
else:
self.reset(init=state_dict['init'], n=state_dict['n'])
self.round = state_dict.get('round', None)
@property
def avg(self):
return (self.n / self.elapsed_time)
@property
def elapsed_time(self):
return (self.init + (time.perf_counter() - self.start))
@property
def smoothed_value(self) -> float:
val = self.avg
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val
|
class StopwatchMeter(Meter):
'Computes the sum/avg duration of some event in seconds'
def __init__(self, round: Optional[int]=None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1):
if (self.start_time is not None):
delta = (time.perf_counter() - self.start_time)
self.sum += delta
self.n += n
def reset(self):
self.sum = 0
self.n = 0
self.start()
def state_dict(self):
return {'sum': self.sum, 'n': self.n, 'round': self.round}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.n = state_dict['n']
self.start_time = None
self.round = state_dict.get('round', None)
@property
def avg(self):
return ((self.sum / self.n) if (self.n > 0) else self.sum)
@property
def elapsed_time(self):
if (self.start_time is None):
return 0.0
return (time.perf_counter() - self.start_time)
@property
def smoothed_value(self) -> float:
val = (self.avg if (self.sum > 0) else self.elapsed_time)
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val
|
class MetersDict(OrderedDict):
'A sorted dictionary of :class:`Meters`.\n\n Meters are sorted according to a priority that is given when the\n meter is first added to the dictionary.\n '
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.priorities = []
def __setitem__(self, key, value):
assert (key not in self), "MetersDict doesn't support reassignment"
(priority, value) = value
bisect.insort(self.priorities, (priority, len(self.priorities), key))
super().__setitem__(key, value)
for (_, _, key) in self.priorities:
self.move_to_end(key)
def add_meter(self, key, meter, priority):
self.__setitem__(key, (priority, meter))
def state_dict(self):
return [(pri, key, self[key].__class__.__name__, self[key].state_dict()) for (pri, _, key) in self.priorities if (not isinstance(self[key], MetersDict._DerivedMeter))]
def load_state_dict(self, state_dict):
self.clear()
self.priorities.clear()
for (pri, key, meter_cls, meter_state) in state_dict:
meter = globals()[meter_cls]()
meter.load_state_dict(meter_state)
self.add_meter(key, meter, pri)
def get_smoothed_value(self, key: str) -> float:
'Get a single smoothed value.'
meter = self[key]
if isinstance(meter, MetersDict._DerivedMeter):
return meter.fn(self)
else:
return meter.smoothed_value
def get_smoothed_values(self) -> Dict[(str, float)]:
'Get all smoothed values.'
return OrderedDict([(key, self.get_smoothed_value(key)) for key in self.keys() if (not key.startswith('_'))])
def reset(self):
'Reset Meter instances.'
for meter in self.values():
if isinstance(meter, MetersDict._DerivedMeter):
continue
meter.reset()
class _DerivedMeter(Meter):
'A Meter whose values are derived from other Meters.'
def __init__(self, fn):
self.fn = fn
def reset(self):
pass
|
@contextlib.contextmanager
def aggregate(name: Optional[str]=None, new_root: bool=False):
'Context manager to aggregate metrics under a given name.\n\n Aggregations can be nested. If *new_root* is ``False``, then logged\n metrics will be recorded along the entire stack of nested\n aggregators, including a global "default" aggregator. If *new_root*\n is ``True``, then this aggregator will be the root of a new\n aggregation stack, thus bypassing any parent aggregators.\n\n Note that aggregation contexts are uniquely identified by their\n *name* (e.g., train, valid). Creating a context with an existing\n name will reuse the corresponding :class:`MetersDict` instance.\n If no name is given, then a temporary aggregator will be created.\n\n Usage::\n\n with metrics.aggregate("train"):\n for step, batch in enumerate(epoch):\n with metrics.aggregate("train_inner") as agg:\n metrics.log_scalar("loss", get_loss(batch))\n if step % log_interval == 0:\n print(agg.get_smoothed_value("loss"))\n agg.reset()\n print(metrics.get_smoothed_values("train")["loss"])\n\n Args:\n name (str): name of the aggregation. Defaults to a\n random/temporary name if not given explicitly.\n new_root (bool): make this aggregation the root of a new\n aggregation stack.\n '
if (name is None):
name = str(uuid.uuid4())
assert (name not in _aggregators)
agg = MetersDict()
else:
assert (name != 'default')
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
(yield agg)
_active_aggregators_cnt[name] -= 1
if ((_active_aggregators_cnt[name] == 0) and (name in _active_aggregators)):
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
|
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
|
def log_scalar(key: str, value: float, weight: float=1, priority: int=10, round: Optional[int]=None):
'Log a scalar value.\n\n Args:\n key (str): name of the field to log\n value (float): value to log\n weight (float): weight that this value contributes to the average.\n A weight of 0 will always log the latest value.\n priority (int): smaller values are logged earlier in the output\n round (Optional[int]): number of digits to round to when displaying\n '
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
|
def log_derived(key: str, fn: Callable[([MetersDict], float)], priority: int=20):
'Log a scalar value derived from other meters.\n\n Args:\n key (str): name of the field to log\n fn (Callable[[MetersDict], float]): function that takes a single\n argument *meters* and returns the derived value\n priority (int): smaller values are logged earlier in the output\n '
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
|
def log_speed(key: str, value: float, priority: int=30, round: Optional[int]=None):
'Log the rate of some quantity per second.\n\n Args:\n key (str): name of the field to log\n value (float): value to log\n priority (int): smaller values are logged earlier in the output\n round (Optional[int]): number of digits to round to when displaying\n '
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset()
else:
agg[key].update(value)
|
def log_start_time(key: str, priority: int=40, round: Optional[int]=None):
'Log the duration of some event in seconds.\n\n The duration will be computed once :func:`log_stop_time` is called.\n\n Args:\n key (str): name of the field to log\n priority (int): smaller values are logged earlier in the output\n round (Optional[int]): number of digits to round to when displaying\n '
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
|
def log_stop_time(key: str, weight: float=0.0):
'Log the duration of some event in seconds.\n\n The duration will be computed since :func:`log_start_time` was called.\n Set weight > 0 to report the average time instead of the sum.\n\n Args:\n key (str): name of the field to log\n weight (float): weight that this time contributes to the average\n '
for agg in get_active_aggregators():
agg[key].stop(weight)
|
def log_custom(new_meter_fn: Callable[([], Meter)], key: str, *args, priority: int=50, **kwargs):
"Log using a custom Meter.\n\n Any extra *args* or *kwargs* will be passed through to the Meter's\n *update* method.\n\n Args:\n new_meter_fn (Callable[[], Meter]): function that returns a new\n Meter instance\n key (str): name of the field to log\n priority (int): smaller values are logged earlier in the output\n "
for agg in get_active_aggregators():
if (key not in agg):
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
|
def reset_meter(name: str, key: str) -> None:
'Reset Meter instance aggregated under a given *name* and *key*.'
meter = get_meter(name, key)
if (meter is not None):
meter.reset()
|
def reset_meters(name: str) -> None:
'Reset Meter instances aggregated under a given *name*.'
meters = get_meters(name)
if (meters is not None):
meters.reset()
|
def get_meter(name: str, key: str) -> Meter:
'Get a single Meter instance aggregated under *name* and *key*.\n\n Returns:\n Meter or None if no metrics have been logged under *name* and *key*.\n '
if (name not in _aggregators):
return None
return _aggregators[name].get(key, None)
|
def get_meters(name: str) -> MetersDict:
'Get Meter instances aggregated under a given *name*.\n\n Returns:\n MetersDict or None if no metrics have been logged under *name*.\n '
return _aggregators.get(name, None)
|
def get_smoothed_value(name: str, key: str) -> float:
'Get a single smoothed value.\n\n Raises:\n KeyError: if no metrics have been logged under *name* and *key*.\n '
return _aggregators[name].get_smoothed_value(key)
|
def get_smoothed_values(name: str) -> Dict[(str, float)]:
'Get smoothed values aggregated under a given *name*.\n\n Raises:\n KeyError: if no metrics have been logged under *name*.\n '
return _aggregators[name].get_smoothed_values()
|
def state_dict():
return OrderedDict([(name, agg.state_dict()) for (name, agg) in _aggregators.items()])
|
def load_state_dict(state_dict):
for (name, agg_state) in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
|
def build_model(args, task):
return ARCH_MODEL_REGISTRY[args.arch].build_model(args, task)
|
def register_model(name):
"\n New model types can be added to fairseq with the :func:`register_model`\n function decorator.\n\n For example::\n\n @register_model('lstm')\n class LSTM(FairseqEncoderDecoderModel):\n (...)\n\n .. note:: All models must implement the :class:`BaseFairseqModel` interface.\n Typically you will extend :class:`FairseqEncoderDecoderModel` for\n sequence-to-sequence tasks or :class:`FairseqLanguageModel` for\n language modeling tasks.\n\n Args:\n name (str): the name of the model\n "
def register_model_cls(cls):
if (name in MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model ({})'.format(name))
if (not issubclass(cls, BaseFairseqModel)):
raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
|
def register_model_architecture(model_name, arch_name):
"\n New model architectures can be added to fairseq with the\n :func:`register_model_architecture` function decorator. After registration,\n model architectures can be selected with the ``--arch`` command-line\n argument.\n\n For example::\n\n @register_model_architecture('lstm', 'lstm_luong_wmt_en_de')\n def lstm_luong_wmt_en_de(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000)\n (...)\n\n The decorated function should take a single argument *args*, which is a\n :class:`argparse.Namespace` of arguments parsed from the command-line. The\n decorated function should modify these arguments in-place to match the\n desired architecture.\n\n Args:\n model_name (str): the name of the Model (Model must already be\n registered)\n arch_name (str): the name of the model architecture (``--arch``)\n "
def register_model_arch_fn(fn):
if (model_name not in MODEL_REGISTRY):
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if (arch_name in ARCH_MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if (not callable(fn)):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
|
class CompositeEncoder(FairseqEncoder):
"\n A wrapper around a dictionary of :class:`FairseqEncoder` objects.\n\n We run forward on each encoder and return a dictionary of outputs. The first\n encoder's dictionary is used for initialization.\n\n Args:\n encoders (dict): a dictionary of :class:`FairseqEncoder` objects.\n "
def __init__(self, encoders):
super().__init__(next(iter(encoders.values())).dictionary)
self.encoders = encoders
for key in self.encoders:
self.add_module(key, self.encoders[key])
def forward(self, src_tokens, src_lengths):
'\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n\n Returns:\n dict:\n the outputs from each Encoder\n '
encoder_out = {}
for key in self.encoders:
encoder_out[key] = self.encoders[key](src_tokens, src_lengths)
return encoder_out
def reorder_encoder_out(self, encoder_out, new_order):
'Reorder encoder output according to new_order.'
for key in self.encoders:
encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order)
return encoder_out
def max_positions(self):
return min((self.encoders[key].max_positions() for key in self.encoders))
def upgrade_state_dict(self, state_dict):
for key in self.encoders:
self.encoders[key].upgrade_state_dict(state_dict)
return state_dict
|
@register_model('delight_transformer_lm')
class DeLighTTransformerLanguageModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
return None
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--adaptive-input', action='store_true', help='Use Adaptive input or standard embedding for mapping function in DeFINE')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--delight-emb-map-dim', type=int, help='Mapping dimension in DeLight embedding layer')
parser.add_argument('--delight-emb-out-dim', type=int, help='Output dimension of DeLight embedding layer')
parser.add_argument('--delight-emb-max-groups', type=int, help='Max. number of groups in DeLight embedding layers')
parser.add_argument('--delight-emb-dropout', type=float, help='Dropout in DeLight embedding layers')
parser.add_argument('--delight-emb-depth', type=int, help='Depth of DeLight unit in embedding layer')
parser.add_argument('--delight-dec-scaling', type=str, choices=['block', 'uniform'], help='Block-wise scaling or uniform')
parser.add_argument('--delight-dec-layers', type=int, help='Number of DeLight decoder layers')
parser.add_argument('--delight-dec-min-depth', type=int, help='Min. number of decoder layers')
parser.add_argument('--delight-dec-max-depth', type=int, help='Max. number of decoder layers')
parser.add_argument('--delight-dec-width-mult', type=int, help='Decoder width multiplier')
parser.add_argument('--delight-dec-ffn-red', type=int, help='Reduce FFN dims in DeLight decoder layer by this factor')
parser.add_argument('--delight-dec-max-groups', type=int, help='Max. groups in DeLight unit in the decoder')
parser.add_argument('--no-glt-shuffle', action='store_true', help='Disable shuffling in GLT transformation.')
parser.add_argument('--define-iclr', action='store_true', help='DeFINE unit as in ICLR paper')
parser.add_argument('--norm-type', type=str, help='Normalization layer')
parser.add_argument('--act-type', type=str, help='Activation function')
parser.add_argument('--delight-dropout', type=float, help='Dropout value for DeLight layers')
parser.add_argument('--ffn-dropout', type=float, help='Dropout after Light-weight FFN')
parser.add_argument('--print-stats', action='store_true', help='Print MACs')
parser.add_argument('--tgt-len-ps', type=int, help='Target length for printing stats')
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--pe-dropout', type=float, metavar='D', help='dropout probability for positional encodings')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--no-decoder-final-norm', action='store_true', help="don't add an extra layernorm after the last decoder block")
parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder')
parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(','))
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.adaptive_input:
map_layer = AdaptiveInput(len(task.source_dictionary), task.source_dictionary.pad(), args.delight_emb_map_dim, args.adaptive_input_factor, args.delight_emb_map_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int), no_scale_emb=args.no_scale_embedding)
else:
map_layer = get_embedding_layer(num_embeddings=len(task.source_dictionary), embedding_dim=args.delight_emb_map_dim, padding_idx=task.source_dictionary.pad())
embed_tokens = DExTraEmb(args, map_layer=map_layer)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert (args.adaptive_input_factor == args.adaptive_softmax_factor)
assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
decoder = DeLighTTransformerDecoder(args, task.target_dictionary, embed_tokens, no_encoder_attn=True)
if (args.print_stats and is_master(args)):
cls.comptue_stats(args, decoder)
return DeLighTTransformerLanguageModel(decoder)
@classmethod
def comptue_stats(cls, args, decoder):
target_length = args.tgt_len_ps
print((('=' * 15) * target_length))
print('{:<90} {:<20}'.format('', cls.__name__))
print((('=' * 15) * target_length))
overall_macs = 0.0
overall_params = 0.0
round_places = 2
dec_string = {}
import csv
with open('{}/decoder_stats_{}.csv'.format(args.save_dir, target_length), mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for time_step in range(1, (target_length + 1)):
for (dec_idx, (k, v)) in enumerate(decoder.compute_macs_params(src_len=1, tgt_len=time_step).items()):
macs = (v['macs'] + v['emb_macs'])
params = (v['params'] + v['emb_params'])
overall_macs += macs
if (time_step == 1):
overall_params += params
macs = round((float(macs) / 1000000.0), round_places)
params = round((float(params) / 1000000.0), round_places)
if (k not in dec_string):
dec_string[k] = [[time_step, params, macs]]
else:
dec_string[k].append([time_step, params, macs])
if (dec_idx == 0):
key_list = list(v.keys())
csv_writer.writerow(((['Time'] + ['Layer']) + key_list))
value_list = list(v.values())
value_list = (([time_step] + [k]) + value_list)
csv_writer.writerow(value_list)
format_str_dec1 = '{:<20} | \t '.format('Layer')
dotted_line = ('-' * 20)
for t in range((target_length + 1)):
if (t == 0):
format_str_dec1 += '{:<10} | \t '.format('Params')
else:
format_str_dec1 += '{:<10} '.format('t_{}'.format(t))
dotted_line += ('-' * 10)
dotted_line += ('-' * 10)
format_str_dec1 += '| \t {:<10} '.format('Overall MAC')
dotted_line += ('-' * 10)
print(dotted_line)
print(format_str_dec1)
print(dotted_line)
for (layer_name, v) in dec_string.items():
time_step_str = '{:<20} | \t '.format(layer_name)
macs = 0
for (idx, (t, p, m)) in enumerate(v):
if (idx == 0):
time_step_str += '{:<10} | \t '.format(p)
time_step_str += '{:<10} '.format(m)
else:
time_step_str += '{:<10} '.format(m)
macs += m
time_step_str += '| \t {:<10} '.format(round(macs, 3))
print(time_step_str)
overall_macs = round((float(overall_macs) / 1000000.0), round_places)
overall_params = round((float(overall_params) / 1000000.0), round_places)
print((('-' * 15) * target_length))
print('Total MACs for {} decoder timesteps: {} M'.format(target_length, overall_macs))
print('Total parameters: {} M'.format(overall_params))
print((('=' * 15) * target_length))
with open('{}/overall_stats_{}.csv'.format(args.save_dir, target_length), mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Time steps', target_length])
csv_writer.writerow(['Total MACs (in million)', overall_macs])
csv_writer.writerow(['Total parameters (in million)', overall_params])
|
@register_model_architecture('delight_transformer_lm', 'delight_transformer_lm')
def base_lm_architecture(args):
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', ADAPTIVE_SCALE_FACTOR)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.delight_emb_map_dim = getattr(args, 'delight_emb_map_dim', 64)
args.delight_emb_out_dim = getattr(args, 'delight_emb_out_dim', 128)
assert ((args.delight_emb_out_dim % MIN_ELEMENTS_PER_GROUP) == 0), 'remainder({}, {}) should be equal to 0'.format(args.delight_emb_out_dim, MIN_ELEMENTS_PER_GROUP)
max_groups = (2 ** math.ceil(math.log((args.delight_emb_out_dim // MIN_ELEMENTS_PER_GROUP), 2)))
args.delight_emb_dropout = getattr(args, 'delight_emb_dropout', DEFAULT_DROPOUT)
args.delight_emb_depth = getattr(args, 'delight_emb_depth', DEFAULT_MIN_DEXTRA_LAYERS)
args.delight_emb_width_mult = getattr(args, 'delight_emb_width_mult', DEFAULT_WIDTH_MULTIPLIER)
args.delight_emb_max_groups = getattr(args, 'delight_emb_max_groups', max_groups)
args.delight_dec_scaling = getattr(args, 'delight_dec_scaling', 'block')
args.delight_dec_layers = getattr(args, 'delight_dec_layers', DEFAULT_MAX_DEXTRA_LAYERS)
args.delight_dec_min_depth = getattr(args, 'delight_dec_min_depth', DEFAULT_MIN_DEXTRA_LAYERS)
args.delight_dec_max_depth = getattr(args, 'delight_dec_max_depth', DEFAULT_MAX_DEXTRA_LAYERS)
args.delight_dec_width_mult = getattr(args, 'delight_dec_width_mult', DEFAULT_WIDTH_MULTIPLIER)
args.delight_dec_max_groups = getattr(args, 'delight_dec_max_groups', max_groups)
args.delight_dec_ffn_red = getattr(args, 'delight_dec_ffn_red', DEFAULT_FFN_RED_FACTOR)
args.no_glt_shuffle = getattr(args, 'no_glt_shuffle', False)
args.glt_shuffle = (not args.no_glt_shuffle)
args.define_iclr = getattr(args, 'define_iclr', False)
args.delight_dropout = getattr(args, 'delight_dropout', DEFAULT_DROPOUT)
args.norm_type = getattr(args, 'norm_type', 'ln')
args.act_type = getattr(args, 'act_type', 'swish')
args.dropout = getattr(args, 'dropout', DEFAULT_DROPOUT)
args.attention_dropout = getattr(args, 'attention_dropout', DEFAULT_DROPOUT)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', DEFAULT_DROPOUT)
args.pe_dropout = getattr(args, 'pe_dropout', DEFAULT_DROPOUT)
args.ffn_dropout = getattr(args, 'ffn_dropout', DEFAULT_DROPOUT)
if hasattr(args, 'no_tie_adaptive_proj'):
args.no_decoder_final_norm = True
if (args.no_tie_adaptive_proj is False):
args.tie_adaptive_proj = True
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = (not args.decoder_final_norm)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', ADAPTIVE_SCALE_FACTOR)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
args.print_stats = getattr(args, 'print_stats', False)
args.tgt_len_ps = getattr(args, 'tgt_len_ps', 20)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.activation_fn = getattr(args, 'activation_fn', 'swish')
args.add_bos_token = getattr(args, 'add_bos_token', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.