code stringlengths 17 6.64M |
|---|
@register_model_architecture('delight_transformer_lm', 'delight_transformer_lm_wiki103')
def delight_transformer_lm_wiki103(args):
args.delight_emb_map_dim = getattr(args, 'delight_emb_map_dim', 128)
args.delight_emb_out_dim = getattr(args, 'delight_emb_out_dim', 512)
args.delight_dec_min_depth = getattr(args, 'delight_dec_min_depth', 4)
args.delight_dec_max_depth = getattr(args, 'delight_dec_max_depth', 8)
args.delight_emb_depth = args.delight_dec_min_depth
args.delight_dec_layers = args.delight_dec_max_depth
args.delight_dec_width_mult = getattr(args, 'delight_dec_width_mult', 2)
args.delight_emb_width_mult = args.delight_dec_width_mult
args.adaptive_input = getattr(args, 'adaptive_input', True)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', True)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', '20000,60000')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '20000,60000')
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', True)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0.1)
scale_dropout = 0.1
delta_model_dimension = (1024.0 / args.delight_emb_out_dim)
scale_dropout_d_m = round((scale_dropout / delta_model_dimension), 2)
scale_dropout_d_m = bound_function(0, 0.3, scale_dropout_d_m)
scale_attn_drop = 0.1
scale_attn_drop_d_m = round((scale_attn_drop / delta_model_dimension), 2)
scale_attn_drop_d_m = bound_function(0, 0.1, scale_attn_drop_d_m)
scale_delight_drop = 0.1
scale_delight_drop_d_m = round((scale_delight_drop / delta_model_dimension), 2)
scale_delight_drop_d_m = bound_function(0, 0.1, scale_delight_drop_d_m)
args.dropout = getattr(args, 'dropout', scale_dropout_d_m)
args.delight_emb_dropout = getattr(args, 'delight_emb_dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', scale_attn_drop_d_m)
args.delight_dropout = getattr(args, 'delight_dropout', scale_delight_drop_d_m)
args.pe_dropout = getattr(args, 'pe_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.ffn_dropout = getattr(args, 'ffn_dropout', scale_dropout_d_m)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
base_lm_architecture(args)
|
def DistributedFairseqModel(args, model):
'\n Wrap a *model* to support distributed data parallel training.\n\n This is similar to the built-in DistributedDataParallel, but allows\n additional configuration of the DistributedDataParallel class to\n use, and also provides easier access to the wrapped model by\n forwarding requests for missing attributes to the wrapped model.\n\n Args:\n args (argparse.Namespace): fairseq args\n model (BaseFairseqModel): model to wrap\n '
assert isinstance(model, nn.Module)
if (args.ddp_backend == 'c10d'):
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=args.broadcast_buffers, bucket_cap_mb=args.bucket_cap_mb)
if ('check_reduction' in inspect.getargspec(ddp_class)[0]):
init_kwargs['check_reduction'] = True
if ('find_unused_parameters' in inspect.getargspec(ddp_class)[0]):
init_kwargs['find_unused_parameters'] = args.find_unused_parameters
elif (args.ddp_backend == 'no_c10d'):
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(module=model, world_size=args.distributed_world_size, buffer_size=(2 ** 28))
else:
raise ValueError(('Unknown --ddp-backend: ' + args.ddp_backend))
class _DistributedFairseqModel(ddp_class):
'Extend DistributedDataParallel to check for missing\n attributes in the wrapped module.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs)
|
class FairseqDecoder(nn.Module):
'Base class for decoders.'
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
"\n Args:\n prev_output_tokens (LongTensor): shifted output tokens of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (dict, optional): output from the encoder, used for\n encoder-side attention\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
(x, extra) = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
x = self.output_layer(x)
return (x, extra)
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
"\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
raise NotImplementedError
def output_layer(self, features, **kwargs):
'\n Project features to the default output size, e.g., vocabulary size.\n\n Args:\n features (Tensor): features returned by *extract_features*.\n '
raise NotImplementedError
def get_normalized_probs(self, net_output, log_probs, sample):
"Get normalized probabilities (or log probs) from a net's output."
if (hasattr(self, 'adaptive_softmax') and (self.adaptive_softmax is not None)):
if (sample is not None):
assert ('target' in sample)
target = sample['target']
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return (out.exp_() if (not log_probs) else out)
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
def max_positions(self):
'Maximum input length supported by the decoder.'
return 1000000.0
def upgrade_state_dict(self, state_dict):
'Upgrade a (possibly old) state dict for new versions of fairseq.'
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True
|
class FairseqEncoder(nn.Module):
'Base class for encoders.'
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
'\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): lengths of each source sentence of shape\n `(batch)`\n '
raise NotImplementedError
def reorder_encoder_out(self, encoder_out, new_order):
'\n Reorder encoder output according to `new_order`.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n `encoder_out` rearranged according to `new_order`\n '
raise NotImplementedError
def max_positions(self):
'Maximum input length supported by the encoder.'
return 1000000.0
def upgrade_state_dict(self, state_dict):
'Upgrade a (possibly old) state dict for new versions of fairseq.'
return state_dict
|
@with_incremental_state
class FairseqIncrementalDecoder(FairseqDecoder):
'Base class for incremental decoders.\n\n Incremental decoding is a special mode at inference time where the Model\n only receives a single timestep of input corresponding to the previous\n output token (for teacher forcing) and must produce the next output\n *incrementally*. Thus the model must cache any long-term state that is\n needed about the sequence, e.g., hidden states, convolutional states, etc.\n\n Compared to the standard :class:`FairseqDecoder` interface, the incremental\n decoder interface allows :func:`forward` functions to take an extra keyword\n argument (*incremental_state*) that can be used to cache state across\n time-steps.\n\n The :class:`FairseqIncrementalDecoder` interface also defines the\n :func:`reorder_incremental_state` method, which is used during beam search\n to select and reorder the incremental state based on the selection of beams.\n\n To learn more about how incremental decoding works, refer to `this blog\n <http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.\n '
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"\n Args:\n prev_output_tokens (LongTensor): shifted output tokens of shape\n `(batch, tgt_len)`, for teacher forcing\n encoder_out (dict, optional): output from the encoder, used for\n encoder-side attention\n incremental_state (dict, optional): dictionary used for storing\n state during :ref:`Incremental decoding`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
"\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
'Reorder incremental state.\n\n This should be called when the order of the input has changed from the\n previous time step. A typical use case is beam search, where the input\n order changes between time steps based on the selection of beams.\n '
seen = set()
for module in self.modules():
if ((module != self) and hasattr(module, 'reorder_incremental_state') and (module not in seen)):
seen.add(module)
result = module.reorder_incremental_state(incremental_state, new_order)
if (result is not None):
incremental_state = result
def set_beam_size(self, beam_size):
'Sets the beam size in the decoder and all children.'
if (getattr(self, '_beam_size', (- 1)) != beam_size):
seen = set()
def apply_set_beam_size(module):
if ((module != self) and hasattr(module, 'set_beam_size') and (module not in seen)):
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
|
class BaseFairseqModel(nn.Module):
'Base class for fairseq models.'
def __init__(self):
super().__init__()
self._is_generation_fast = False
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
pass
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
raise NotImplementedError('Model must implement the build_model method')
def get_targets(self, sample, net_output):
"Get targets from either the sample or the net's output."
return sample['target']
def get_normalized_probs(self, net_output, log_probs, sample=None):
"Get normalized probabilities (or log probs) from a net's output."
if hasattr(self, 'decoder'):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=(- 1))
else:
return F.softmax(logits, dim=(- 1))
raise NotImplementedError
def extract_features(self, *args, **kwargs):
'Similar to *forward* but only return features.'
return self(*args, **kwargs)
def max_positions(self):
'Maximum length supported by the model.'
return None
def load_state_dict(self, state_dict, strict=True, args=None):
'Copies parameters and buffers from *state_dict* into this module and\n its descendants.\n\n Overrides the method in :class:`nn.Module`. Compared with that method\n this additionally "upgrades" *state_dicts* from old checkpoints.\n '
self.upgrade_state_dict(state_dict)
new_state_dict = prune_state_dict(state_dict, args)
return super().load_state_dict(new_state_dict, strict)
def upgrade_state_dict(self, state_dict):
'Upgrade old state dicts to work with newer code.'
self.upgrade_state_dict_named(state_dict, '')
def upgrade_state_dict_named(self, state_dict, name):
'Upgrade old state dicts to work with newer code.\n\n Args:\n state_dict (dict): state dictionary to upgrade, in place\n name (str): the state dict key corresponding to the current module\n '
assert (state_dict is not None)
def do_upgrade(m, prefix):
if (len(prefix) > 0):
prefix += '.'
for (n, c) in m.named_children():
name = (prefix + n)
if hasattr(c, 'upgrade_state_dict_named'):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, 'upgrade_state_dict'):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def make_generation_fast_(self, **kwargs):
'Optimize model for faster generation.'
if self._is_generation_fast:
return
self._is_generation_fast = True
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError:
return
self.apply(apply_remove_weight_norm)
seen = set()
def apply_make_generation_fast_(module):
if ((module != self) and hasattr(module, 'make_generation_fast_') and (module not in seen)):
seen.add(module)
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode=True):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
'Make model exportable via ONNX trace.'
seen = set()
def apply_prepare_for_onnx_export_(module):
if ((module != self) and hasattr(module, 'prepare_for_onnx_export_') and (module not in seen)):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', **kwargs):
"\n Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model\n file. Downloads and caches the pre-trained model file if needed.\n\n The base implementation returns a\n :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to\n generate translations or sample from language models. The underlying\n :class:`~fairseq.models.FairseqModel` can be accessed via the\n *generator.models* attribute.\n\n Other models may override this to implement custom hub interfaces.\n\n Args:\n model_name_or_path (str): either the name of a pre-trained model to\n load or a path/URL to a pre-trained model state dict\n checkpoint_file (str, optional): colon-separated list of checkpoint\n files in the model archive to ensemble (default: 'model.pt')\n data_name_or_path (str, optional): point args.data to the archive\n at the given path/URL. Can start with '.' or './' to reuse the\n model archive path.\n "
from fairseq import hub_utils
x = hub_utils.from_pretrained(model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), **kwargs)
logger.info(x['args'])
return hub_utils.GeneratorHubInterface(x['args'], x['task'], x['models'])
@classmethod
def hub_models(cls):
return {}
|
class FairseqEncoderDecoderModel(BaseFairseqModel):
'Base class for encoder-decoder models.\n\n Args:\n encoder (FairseqEncoder): the encoder\n decoder (FairseqDecoder): the decoder\n '
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
assert isinstance(self.encoder, FairseqEncoder)
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., teacher forcing) to\n the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, tgt_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return features
def output_layer(self, features, **kwargs):
'Project features to the default output size (typically vocabulary size).'
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
'Maximum length supported by the model.'
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
'Maximum length supported by the decoder.'
return self.decoder.max_positions()
|
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning('FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead', stacklevel=4)
|
class FairseqMultiModel(BaseFairseqModel):
'Base class for combining multiple encoder-decoder models.'
def __init__(self, encoders, decoders):
super().__init__()
assert (encoders.keys() == decoders.keys())
self.keys = list(encoders.keys())
for key in self.keys:
assert isinstance(encoders[key], FairseqEncoder)
assert isinstance(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict({key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys})
@staticmethod
def build_shared_embeddings(dicts: Dict[(str, Dictionary)], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str]=None):
'\n Helper function to build shared embeddings for a set of languages after\n checking that all dicts corresponding to those languages are equivalent.\n\n Args:\n dicts: Dict of lang_id to its corresponding Dictionary\n langs: languages that we want to share embeddings for\n embed_dim: embedding dimension\n build_embedding: callable function to actually build the embedding\n pretrained_embed_path: Optional path to load pretrained embeddings\n '
shared_dict = dicts[langs[0]]
if any(((dicts[lang] != shared_dict) for lang in langs)):
raise ValueError('--share-*-embeddings requires a joined dictionary: --share-encoder-embeddings requires a joined source dictionary, --share-decoder-embeddings requires a joined target dictionary, and --share-all-embeddings requires a joint source + target dictionary.')
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
decoder_outs = {}
for key in self.keys:
encoder_out = self.models[key].encoder(src_tokens, src_lengths, **kwargs)
decoder_outs[key] = self.models[key].decoder(prev_output_tokens, encoder_out, **kwargs)
return decoder_outs
def max_positions(self):
'Maximum length supported by the model.'
return {key: (self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions()) for key in self.keys}
def max_decoder_positions(self):
'Maximum length supported by the decoder.'
return min((model.decoder.max_positions() for model in self.models.values()))
@property
def encoder(self):
return self.models[self.keys[0]].encoder
@property
def decoder(self):
return self.models[self.keys[0]].decoder
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def load_state_dict(self, state_dict, strict=True, args=None):
'Copies parameters and buffers from *state_dict* into this module and\n its descendants.\n\n Overrides the method in :class:`nn.Module`. Compared with that method\n this additionally "upgrades" *state_dicts* from old checkpoints.\n '
self.upgrade_state_dict(state_dict)
new_state_dict = prune_state_dict(state_dict, args)
return super().load_state_dict(new_state_dict, strict)
|
class FairseqLanguageModel(BaseFairseqModel):
'Base class for decoder-only models.\n\n Args:\n decoder (FairseqDecoder): the decoder\n '
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, **kwargs):
"\n Run the forward pass for a decoder-only model.\n\n Feeds a batch of tokens through the decoder to predict the next tokens.\n\n Args:\n src_tokens (LongTensor): tokens on which to condition the decoder,\n of shape `(batch, tgt_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, seq_len, vocab)`\n - a dictionary with any model-specific outputs\n "
return self.decoder(src_tokens, **kwargs)
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, **kwargs):
"\n Similar to *forward* but only return features.\n\n Returns:\n tuple:\n - the decoder's features of shape `(batch, seq_len, embed_dim)`\n - a dictionary with any model-specific outputs\n "
return self.decoder.extract_features(src_tokens, **kwargs)
def output_layer(self, features, **kwargs):
'Project features to the default output size (typically vocabulary size).'
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
'Maximum length supported by the model.'
return self.decoder.max_positions()
def max_decoder_positions(self):
'Maximum length supported by the decoder.'
return self.decoder.max_positions()
@property
def supported_targets(self):
return {'future'}
|
class FairseqEncoderModel(BaseFairseqModel):
'Base class for encoder-only models.\n\n Args:\n encoder (FairseqEncoder): the encoder\n '
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
assert isinstance(self.encoder, FairseqEncoder)
def forward(self, src_tokens, src_lengths, **kwargs):
"\n Run the forward pass for a encoder-only model.\n\n Feeds a batch of tokens through the encoder to generate features.\n\n Args:\n src_tokens (LongTensor): input tokens of shape `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n\n Returns:\n the encoder's output, typically of shape `(batch, src_len, features)`\n "
return self.encoder(src_tokens, src_lengths, **kwargs)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"Get normalized probabilities (or log probs) from a net's output."
encoder_out = net_output['encoder_out']
if torch.is_tensor(encoder_out):
logits = encoder_out.float()
if log_probs:
return F.log_softmax(logits, dim=(- 1))
else:
return F.softmax(logits, dim=(- 1))
raise NotImplementedError
def max_positions(self):
'Maximum length supported by the model.'
return self.encoder.max_positions()
|
@register_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if (hasattr(args, 'max_target_positions') and (not hasattr(args, 'tokens_per_sample'))):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=(options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if (args.criterion == 'adaptive_loss') else None), adaptive_softmax_dropout=args.adaptive_softmax_dropout)
return FConvLanguageModel(decoder)
|
@register_model_architecture('fconv_lm', 'fconv_lm')
def base_lm_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13')
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
|
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103')
def fconv_lm_dauphin_wikitext103(args):
layers = '[(850, 6)] * 3'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 5)] * 4'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 4)] * 3'
layers += ' + [(1024, 4)] * 1'
layers += ' + [(2048, 4)] * 1'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000')
base_lm_architecture(args)
|
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw')
def fconv_lm_dauphin_gbw(args):
layers = '[(512, 5)]'
layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3'
layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
base_lm_architecture(args)
|
@register_model('fconv_self_att')
class FConvModelSelfAtt(FairseqEncoderDecoderModel):
@classmethod
def hub_models(cls):
return {'conv.stories.pretrained': {'path': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz', 'checkpoint_file': 'pretrained_checkpoint.pt', 'tokenizer': 'nltk'}, 'conv.stories': {'path': 'https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz', 'checkpoint_file': 'fusion_checkpoint.pt', 'tokenizer': 'nltk', 'pretrained': 'True', 'pretrained_checkpoint': './pretrained_checkpoint.pt'}, 'data.stories': 'https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2'}
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(((layer is not None) for layer in decoder.attention))
self.pretrained_encoder = pretrained_encoder
if (self.pretrained_encoder is None):
encoders = {'encoder': encoder}
else:
encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder}
self.encoder = CompositeEncoder(encoders)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR', help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR', help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int, help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int, help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR', help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int, help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR', help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR', help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR', help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR', help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR', help='use pretrained model when training [True, ...]')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
(trained_encoder, trained_decoder) = (None, None)
pretrained = eval(args.pretrained)
if pretrained:
logger.info('loading pretrained model')
if (not os.path.exists(args.pretrained_checkpoint)):
new_pretrained_checkpoint = os.path.join(args.data, args.pretrained_checkpoint)
if os.path.exists(new_pretrained_checkpoint):
args.pretrained_checkpoint = new_pretrained_checkpoint
trained_model = checkpoint_utils.load_model_ensemble(filenames=[args.pretrained_checkpoint], task=task)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
encoder = FConvEncoder(task.source_dictionary, embed_dim=args.encoder_embed_dim, convolutions=eval(args.encoder_layers), dropout=args.dropout, max_positions=args.max_source_positions, attention=eval(args.encoder_attention), attention_nheads=args.encoder_attention_nheads)
decoder = FConvDecoder(task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_out_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.max_target_positions, selfattention=eval(args.self_attention), attention_nheads=args.multihead_attention_nheads, selfattention_nheads=args.multihead_self_attention_nheads, project_input=eval(args.project_input), gated_attention=eval(args.gated_attention), downsample=eval(args.downsample), pretrained=pretrained, trained_decoder=trained_decoder)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
@property
def pretrained(self):
return (self.pretrained_encoder is not None)
|
class FConvEncoder(FairseqEncoder):
'Convolutional encoder'
def __init__(self, dictionary, embed_dim=512, max_positions=1024, convolutions=(((512, 3),) * 20), dropout=0.1, attention=False, attention_nheads=1):
super().__init__(dictionary)
self.dropout = dropout
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, self.padding_idx)
def expand_bool_array(val):
if isinstance(val, bool):
return ([val] * len(convolutions))
return val
attention = expand_bool_array(attention)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.attproj = nn.ModuleList()
for (i, (out_channels, kernel_size)) in enumerate(convolutions):
self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None))
self.convolutions.append(ConvTBC(in_channels, (out_channels * 2), kernel_size, dropout=dropout))
self.attention.append((SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None))
in_channels = out_channels
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
x = (self.embed_tokens(src_tokens) + self.embed_positions(src_tokens))
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x.transpose(0, 1)
x = self.fc1(x)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
if (not encoder_padding_mask.any()):
encoder_padding_mask = None
x = x.transpose(0, 1)
for (proj, conv, attention) in zip(self.projections, self.convolutions, self.attention):
residual = (x if (proj is None) else proj(x))
if (encoder_padding_mask is not None):
x = x.masked_fill(encoder_padding_mask.unsqueeze((- 1)), 0)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_l = ((conv.kernel_size[0] - 1) // 2)
padding_r = (conv.kernel_size[0] // 2)
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if (attention is not None):
x = attention(x)
x = ((x + residual) * math.sqrt(0.5))
x = x.transpose(1, 0)
x = self.fc2(x)
if (encoder_padding_mask is not None):
encoder_padding_mask = encoder_padding_mask.t()
x = x.masked_fill(encoder_padding_mask.unsqueeze((- 1)), 0)
x = GradMultiply.apply(x, (1.0 / (2.0 * self.num_attention_layers)))
y = ((x + input_embedding.transpose(0, 1)) * math.sqrt(0.5))
return {'encoder_out': (x, y), 'encoder_padding_mask': encoder_padding_mask}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out['encoder_out'] = tuple((eo.index_select(0, new_order) for eo in encoder_out['encoder_out']))
if (encoder_out['encoder_padding_mask'] is not None):
encoder_out['encoder_padding_mask'] = encoder_out['encoder_padding_mask'].index_select(0, new_order)
if ('pretrained' in encoder_out):
encoder_out['pretrained']['encoder_out'] = tuple((eo.index_select(0, new_order) for eo in encoder_out['pretrained']['encoder_out']))
return encoder_out
def max_positions(self):
'Maximum input length supported by the encoder.'
return self.embed_positions.max_positions
|
@with_incremental_state
class FConvDecoder(FairseqDecoder):
'Convolutional decoder'
def __init__(self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=(((512, 3),) * 8), attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout = dropout
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
return ([val] * len(convolutions))
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if ((not isinstance(attention, list)) or (len(attention) != len(convolutions))):
raise ValueError('Attention is expected to be a list of booleans of length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for (i, (out_channels, kernel_size)) in enumerate(convolutions):
self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None))
self.convolutions.append(LinearizedConv1d(in_channels, (out_channels * 2), kernel_size, padding=(kernel_size - 1), dropout=dropout))
self.attention.append((DownsampledMultiHeadAttention(out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False) if attention[i] else None))
self.attproj.append((Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None))
self.selfattention.append((SelfAttention(out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample) if selfattention[i] else None))
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
if self.pretrained:
self.gate1 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid())
self.gate2 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid())
self.joining = nn.Sequential(Linear((out_embed_dim * 2), (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim))
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs['out'] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out):
trained_encoder_out = (encoder_out['pretrained'] if self.pretrained else None)
encoder_out = encoder_out['encoder']['encoder_out']
(encoder_a, encoder_b) = self._split_encoder_out(encoder_out)
positions = self.embed_positions(prev_output_tokens)
x = (self.embed_tokens(prev_output_tokens) + positions)
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
x = self.fc1(x)
x = x.transpose(0, 1)
avg_attn_scores = None
for (proj, conv, attention, selfattention, attproj) in zip(self.projections, self.convolutions, self.attention, self.selfattention, self.attproj):
residual = (x if (proj is None) else proj(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
if (attention is not None):
r = x
(x, attn_scores) = attention((attproj(x) + target_embedding), encoder_a, encoder_b)
x = (x + r)
if ((not self.training) and self.need_attn):
if (avg_attn_scores is None):
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if (selfattention is not None):
x = selfattention(x)
x = ((x + residual) * math.sqrt(0.5))
x = x.transpose(0, 1)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if (not self.pretrained):
x = self.fc3(x)
if self.pretrained:
(trained_x, _) = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs['out']], dim=(- 1))
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = (gate1 * x)
gated_x2 = (gate2 * self.pretrained_outputs['out'])
fusion = torch.cat([gated_x1, gated_x2], dim=(- 1))
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return (fusion_output, avg_attn_scores)
else:
return (x, avg_attn_scores)
def max_positions(self):
'Maximum output length supported by the decoder.'
return self.embed_positions.max_positions
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
'Split and transpose encoder outputs.'
(encoder_a, encoder_b) = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result
|
class SelfAttention(nn.Module):
def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False):
super().__init__()
self.attention = DownsampledMultiHeadAttention(out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample)
self.in_proj_q = Linear(out_channels, embed_dim)
self.in_proj_k = Linear(out_channels, embed_dim)
self.in_proj_v = Linear(out_channels, embed_dim)
self.ln = LayerNorm(out_channels)
def forward(self, x):
residual = x
query = self.in_proj_q(x)
key = self.in_proj_k(x)
value = self.in_proj_v(x)
(x, _) = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True)
return self.ln((x + residual))
|
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
|
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
m.weight.data.normal_(0, 0.1)
return m
|
def Linear(in_features, out_features, dropout=0.0):
'Weight-normalized Linear layer (input: N x T x C)'
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt(((1 - dropout) / in_features)))
m.bias.data.zero_()
return m
|
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
'Weight-normalized Conv1d layer optimized for decoding'
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt(((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
|
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
'Weight-normalized Conv1d layer'
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt(((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
|
@register_model_architecture('fconv_self_att', 'fconv_self_att')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.self_attention = getattr(args, 'self_attention', 'False')
args.encoder_attention = getattr(args, 'encoder_attention', 'False')
args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1)
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1)
args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1)
args.project_input = getattr(args, 'project_input', 'False')
args.gated_attention = getattr(args, 'gated_attention', 'False')
args.downsample = getattr(args, 'downsample', 'False')
args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '')
args.pretrained = getattr(args, 'pretrained', 'False')
|
@register_model_architecture('fconv_self_att', 'fconv_self_att_wp')
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.self_attention = getattr(args, 'self_attention', 'True')
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4)
args.project_input = getattr(args, 'project_input', 'True')
args.gated_attention = getattr(args, 'gated_attention', 'True')
args.downsample = getattr(args, 'downsample', 'True')
base_architecture(args)
|
@register_model('lightconv_lm')
class LightConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', default=0.1, type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', default=0.0, type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', default=0.0, type=float, metavar='D', help='dropout probability after ReLU in FFN')
parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true', help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings')
parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4, help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2, help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', default=False, action='store_true', help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
'LightConv and DynamicConv arguments'
parser.add_argument('--decoder-kernel-size-list', type=(lambda x: options.eval_str_list(x, int)), help='list of kernel size (default: "[3,7,15,31,31,31]")')
parser.add_argument('--decoder-glu', type=options.eval_bool, help='glu after in proj')
parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution')
parser.add_argument('--weight-softmax', default=True, type=options.eval_bool)
parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if (getattr(args, 'max_source_positions', None) is None):
args.max_source_positions = args.tokens_per_sample
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert (args.adaptive_input_factor == args.adaptive_softmax_factor)
assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert (args.decoder_input_dim == args.decoder_output_dim)
decoder = LightConvDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False)
return LightConvLanguageModel(decoder)
|
@register_model_architecture('lightconv_lm', 'lightconv_lm')
def base_lm_architecture(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.character_embeddings = getattr(args, 'character_embeddings', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.decoder_conv_dim = getattr(args, 'decoder_conv_dim', args.decoder_embed_dim)
args.decoder_normalize_before = True
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31])
if (len(args.decoder_kernel_size_list) == 1):
args.decoder_kernel_size_list = (args.decoder_kernel_size_list * args.decoder_layers)
assert (len(args.decoder_kernel_size_list) == args.decoder_layers), "decoder_kernel_size_list doesn't match decoder_layers"
args.decoder_glu = getattr(args, 'decoder_glu', True)
args.input_dropout = getattr(args, 'input_dropout', 0.1)
args.weight_dropout = getattr(args, 'weight_dropout', args.attention_dropout)
|
@register_model_architecture('lightconv_lm', 'lightconv_lm_gbw')
def lightconv_lm_gbw(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_lm_architecture(args)
|
@register_model('lstm_lm')
class LSTMLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_architecture(args)
if (getattr(args, 'max_target_positions', None) is not None):
max_target_positions = args.max_target_positions
else:
max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim)
if args.share_decoder_input_output_embed:
if (task.source_dictionary != task.target_dictionary):
raise ValueError('--share-decoder-input-output-embeddings requires a joint dictionary')
if (args.decoder_embed_dim != args.decoder_out_embed_dim):
raise ValueError('--share-decoder-input-output-embeddings requires --decoder-embed-dim to match --decoder-out-embed-dim')
decoder = LSTMDecoder(dictionary=task.dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=options.eval_bool(args.decoder_attention), encoder_output_units=0, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=(options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if (args.criterion == 'adaptive_loss') else None), max_target_positions=max_target_positions)
return cls(decoder)
|
@register_model_architecture('lstm_lm', 'lstm_lm')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', args.decoder_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 1)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
args.decoder_attention = getattr(args, 'decoder_attention', '0')
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
|
@register_model('masked_lm')
class MaskedLMModel(BaseFairseqModel):
'\n Class for training a Masked Language Model. It also supports an\n additional sentence level prediction if the sent-loss argument is set.\n '
def __init__(self, args, encoder):
super().__init__()
self.args = args
self.encoder = encoder
if getattr(args, 'apply_bert_init', False):
self.apply(init_bert_params)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--act-dropout', type=float, metavar='D', help='dropout probability after activation in FFN')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--bias-kv', action='store_true', help='if set, adding a learnable bias kv')
parser.add_argument('--zero-attn', action='store_true', help='if set, pads attn with zero')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--share-encoder-input-output-embed', action='store_true', help='share encoder input and output embeddings')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--num-segment', type=int, metavar='N', help='num segment in the input')
parser.add_argument('--sentence-class-num', type=int, metavar='N', help='number of classes for sentence task')
parser.add_argument('--sent-loss', action='store_true', help='if set, calculate sentence level predictions')
parser.add_argument('--apply-bert-init', action='store_true', help='use custom param initialization for BERT')
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='Which activation function to use for pooler layer.')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
def forward(self, src_tokens, segment_labels=None, **kwargs):
return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs)
def max_positions(self):
return self.encoder.max_positions
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_architecture(args)
if (not hasattr(args, 'max_positions')):
args.max_positions = args.tokens_per_sample
logger.info(args)
encoder = MaskedLMEncoder(args, task.dictionary)
return cls(args, encoder)
|
class MaskedLMEncoder(FairseqEncoder):
'\n Encoder for Masked Language Modelling.\n '
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.padding_idx = dictionary.pad()
self.vocab_size = dictionary.__len__()
self.max_positions = args.max_positions
self.sentence_encoder = TransformerSentenceEncoder(padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=(not args.no_token_positional_embeddings), encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, add_bias_kv=args.bias_kv, add_zero_attn=args.zero_attn)
self.share_input_output_embed = args.share_encoder_input_output_embed
self.embed_out = None
self.sentence_projection_layer = None
self.sentence_out_dim = args.sentence_class_num
self.lm_output_learned_bias = None
self.load_softmax = (not getattr(args, 'remove_head', False))
self.masked_lm_pooler = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn)
self.lm_head_transform_weight = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.activation_fn = utils.get_activation_fn(args.activation_fn)
self.layer_norm = LayerNorm(args.encoder_embed_dim)
self.lm_output_learned_bias = None
if self.load_softmax:
self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size))
if (not self.share_input_output_embed):
self.embed_out = nn.Linear(args.encoder_embed_dim, self.vocab_size, bias=False)
if args.sent_loss:
self.sentence_projection_layer = nn.Linear(args.encoder_embed_dim, self.sentence_out_dim, bias=False)
def forward(self, src_tokens, segment_labels=None, **unused):
"\n Forward pass for Masked LM encoder. This first computes the token\n embedding using the token embedding matrix, position embeddings (if\n specified) and segment embeddings (if specified).\n\n Here we assume that the sentence representation corresponds to the\n output of the classification_token (see bert_task or cross_lingual_lm\n task for more details).\n Args:\n - src_tokens: B x T matrix representing sentences\n - segment_labels: B x T matrix representing segment label for tokens\n Returns:\n - a tuple of the following:\n - logits for predictions in format B x T x C to be used in\n softmax afterwards\n - a dictionary of additional data, where 'pooled_output' contains\n the representation for classification_token and 'inner_states'\n is a list of internal model states used to compute the\n predictions (similar in ELMO). 'sentence_logits'\n is the prediction logit for NSP task and is only computed if\n this is specified in the input arguments.\n "
(inner_states, sentence_rep) = self.sentence_encoder(src_tokens, segment_labels=segment_labels)
x = inner_states[(- 1)].transpose(0, 1)
x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x)))
pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep))
if (self.share_input_output_embed and hasattr(self.sentence_encoder.embed_tokens, 'weight')):
x = F.linear(x, self.sentence_encoder.embed_tokens.weight)
elif (self.embed_out is not None):
x = self.embed_out(x)
if (self.lm_output_learned_bias is not None):
x = (x + self.lm_output_learned_bias)
sentence_logits = None
if self.sentence_projection_layer:
sentence_logits = self.sentence_projection_layer(pooled_output)
return (x, {'inner_states': inner_states, 'pooled_output': pooled_output, 'sentence_logits': sentence_logits})
def max_positions(self):
'Maximum output length supported by the encoder.'
return self.max_positions
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding):
state_dict[(name + '.sentence_encoder.embed_positions._float_tensor')] = torch.FloatTensor(1)
if (not self.load_softmax):
for k in list(state_dict.keys()):
if (('embed_out.weight' in k) or ('sentence_projection_layer.weight' in k) or ('lm_output_learned_bias' in k)):
del state_dict[k]
return state_dict
|
@register_model_architecture('masked_lm', 'masked_lm')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.act_dropout = getattr(args, 'act_dropout', 0.0)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.num_segment = getattr(args, 'num_segment', 2)
args.sentence_class_num = getattr(args, 'sentence_class_num', 2)
args.sent_loss = getattr(args, 'sent_loss', False)
args.apply_bert_init = getattr(args, 'apply_bert_init', False)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
|
@register_model_architecture('masked_lm', 'bert_base')
def bert_base_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.num_segment = getattr(args, 'num_segment', 2)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.sentence_class_num = getattr(args, 'sentence_class_num', 2)
args.sent_loss = getattr(args, 'sent_loss', True)
args.apply_bert_init = getattr(args, 'apply_bert_init', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
base_architecture(args)
|
@register_model_architecture('masked_lm', 'bert_large')
def bert_large_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_layers = getattr(args, 'encoder_layers', 24)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
bert_base_architecture(args)
|
@register_model_architecture('masked_lm', 'xlm_base')
def xlm_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.num_segment = getattr(args, 'num_segment', 1)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.sent_loss = getattr(args, 'sent_loss', False)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.apply_bert_init = getattr(args, 'apply_bert_init', True)
base_architecture(args)
|
@register_model('multilingual_transformer')
class MultilingualTransformerModel(FairseqMultiModel):
'Train Transformer models for multiple language pairs simultaneously.\n\n Requires `--task multilingual_translation`.\n\n We inherit all arguments from TransformerModel and assume that all language\n pairs use a single Transformer architecture. In addition, we provide several\n options that are specific to the multilingual setting.\n\n Args:\n --share-encoder-embeddings: share encoder embeddings across all source languages\n --share-decoder-embeddings: share decoder embeddings across all target languages\n --share-encoders: share all encoder params (incl. embeddings) across all source languages\n --share-decoders: share all decoder params (incl. embeddings) across all target languages\n '
def __init__(self, encoders, decoders):
super().__init__(encoders, decoders)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
TransformerModel.add_args(parser)
parser.add_argument('--share-encoder-embeddings', action='store_true', help='share encoder embeddings across languages')
parser.add_argument('--share-decoder-embeddings', action='store_true', help='share decoder embeddings across languages')
parser.add_argument('--share-encoders', action='store_true', help='share encoders across languages')
parser.add_argument('--share-decoders', action='store_true', help='share decoders across languages')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
base_multilingual_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = 1024
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = 1024
src_langs = [lang_pair.split('-')[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
(shared_encoder_embed_tokens, shared_decoder_embed_tokens) = (None, None)
if args.share_all_embeddings:
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path)
(lang_encoders, lang_decoders) = ({}, {})
def get_encoder(lang):
if (lang not in lang_encoders):
if (shared_encoder_embed_tokens is not None):
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path)
lang_encoders[lang] = TransformerEncoder(args, task.dicts[lang], encoder_embed_tokens)
return lang_encoders[lang]
def get_decoder(lang):
if (lang not in lang_decoders):
if (shared_decoder_embed_tokens is not None):
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path)
lang_decoders[lang] = TransformerDecoder(args, task.dicts[lang], decoder_embed_tokens)
return lang_decoders[lang]
(shared_encoder, shared_decoder) = (None, None)
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
(encoders, decoders) = (OrderedDict(), OrderedDict())
for (lang_pair, src, tgt) in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (shared_encoder if (shared_encoder is not None) else get_encoder(src))
decoders[lang_pair] = (shared_decoder if (shared_decoder is not None) else get_decoder(tgt))
return MultilingualTransformerModel(encoders, decoders)
def load_state_dict(self, state_dict, strict=True, args=None):
state_dict_subset = state_dict.copy()
for (k, _) in state_dict.items():
assert k.startswith('models.')
lang_pair = k.split('.')[1]
if (lang_pair not in self.models):
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, args=args)
|
@register_model_architecture('multilingual_transformer', 'multilingual_transformer')
def base_multilingual_architecture(args):
base_architecture(args)
args.share_encoder_embeddings = getattr(args, 'share_encoder_embeddings', False)
args.share_decoder_embeddings = getattr(args, 'share_decoder_embeddings', False)
args.share_encoders = getattr(args, 'share_encoders', False)
args.share_decoders = getattr(args, 'share_decoders', False)
|
@register_model_architecture('multilingual_transformer', 'multilingual_transformer_iwslt_de_en')
def multilingual_transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_multilingual_architecture(args)
|
def _skeptical_unmasking(output_scores, output_masks, p):
sorted_index = output_scores.sort((- 1))[1]
boundary_len = ((output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p).long()
skeptical_mask = (new_arange(output_masks) < boundary_len)
return skeptical_mask.scatter(1, sorted_index, skeptical_mask)
|
@register_model('cmlm_transformer')
class CMLMNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
def forward(self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs):
assert (not self.decoder.src_embedding_copy), 'do not support embedding copy.'
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out)
length_tgt = self.decoder.forward_length_prediction(length_out, encoder_out, tgt_tokens)
word_ins_out = self.decoder(normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
word_ins_mask = prev_output_tokens.eq(self.unk)
return {'word_ins': {'out': word_ins_out, 'tgt': tgt_tokens, 'mask': word_ins_mask, 'ls': self.args.label_smoothing, 'nll_loss': True}, 'length': {'out': length_out, 'tgt': length_tgt, 'factor': self.decoder.length_loss_factor}}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
max_step = decoder_out.max_step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
output_masks = output_tokens.eq(self.unk)
(_scores, _tokens) = self.decoder(normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out).max((- 1))
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if (history is not None):
history.append(output_tokens.clone())
if ((step + 1) < max_step):
skeptical_mask = _skeptical_unmasking(output_scores, output_tokens.ne(self.pad), (1 - ((step + 1) / max_step)))
output_tokens.masked_fill_(skeptical_mask, self.unk)
output_scores.masked_fill_(skeptical_mask, 0.0)
if (history is not None):
history.append(output_tokens.clone())
return decoder_out._replace(output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history)
|
@register_model_architecture('cmlm_transformer', 'cmlm_transformer')
def cmlm_base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.0)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.dropout = getattr(args, 'dropout', 0.1)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.apply_bert_init = getattr(args, 'apply_bert_init', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.sg_length_pred = getattr(args, 'sg_length_pred', False)
args.pred_length_offset = getattr(args, 'pred_length_offset', False)
args.length_loss_factor = getattr(args, 'length_loss_factor', 0.1)
args.ngram_predictor = getattr(args, 'ngram_predictor', 1)
args.src_embedding_copy = getattr(args, 'src_embedding_copy', False)
|
@register_model_architecture('cmlm_transformer', 'cmlm_transformer_wmt_en_de')
def cmlm_wmt_en_de(args):
cmlm_base_architecture(args)
|
@register_model('nacrf_transformer')
class NACRFTransformerModel(NATransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.crf_layer = DynamicCRF(num_embedding=len(self.tgt_dict), low_rank=args.crf_lowrank_approx, beam_size=args.crf_beam_approx)
@property
def allow_ensemble(self):
return False
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument('--crf-lowrank-approx', type=int, help='the dimension of low-rank approximation of transition')
parser.add_argument('--crf-beam-approx', type=int, help='the beam size for apporixmating the normalizing factor')
parser.add_argument('--word-ins-loss-factor', type=float, help='weights on NAT loss used to co-training with CRF loss.')
def forward(self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out)
length_tgt = self.decoder.forward_length_prediction(length_out, encoder_out, tgt_tokens)
word_ins_out = self.decoder(normalize=False, prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
(word_ins_tgt, word_ins_mask) = (tgt_tokens, tgt_tokens.ne(self.pad))
crf_nll = (- self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask))
crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum((- 1))).mean()
return {'word_ins': {'out': word_ins_out, 'tgt': word_ins_tgt, 'mask': word_ins_mask, 'ls': self.args.label_smoothing, 'nll_loss': True, 'factor': self.args.word_ins_loss_factor}, 'word_crf': {'loss': crf_nll}, 'length': {'out': length_out, 'tgt': length_tgt, 'factor': self.decoder.length_loss_factor}}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
output_masks = output_tokens.ne(self.pad)
word_ins_out = self.decoder(normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out)
(_scores, _tokens) = self.crf_layer.forward_decoder(word_ins_out, output_masks)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if (history is not None):
history.append(output_tokens.clone())
return decoder_out._replace(output_tokens=output_tokens, output_scores=output_scores, attn=None, history=history)
|
@register_model_architecture('nacrf_transformer', 'nacrf_transformer')
def nacrf_base_architecture(args):
args.crf_lowrank_approx = getattr(args, 'crf_lowrank_approx', 32)
args.crf_beam_approx = getattr(args, 'crf_beam_approx', 64)
args.word_ins_loss_factor = getattr(args, 'word_ins_loss_factor', 0.5)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
base_architecture(args)
|
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
'\n Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).\n\n Args:\n roberta (RobertaHubInterface): RoBERTa instance\n bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`\n other_tokens (List[str]): other tokens of shape `(T_words)`\n\n Returns:\n List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.\n '
assert (bpe_tokens.dim() == 1)
assert (bpe_tokens[0] == 0)
def clean(text):
return text.strip()
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [clean((roberta.bpe.decode(x) if (x not in {'<s>', ''}) else x)) for x in bpe_tokens]
other_tokens = [clean(str(o)) for o in other_tokens]
bpe_tokens = bpe_tokens[1:]
assert (''.join(bpe_tokens) == ''.join(other_tokens))
alignment = []
bpe_toks = filter((lambda item: (item[1] != '')), enumerate(bpe_tokens, start=1))
(j, bpe_tok) = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok):]
try:
(j, bpe_tok) = next(bpe_toks)
except StopIteration:
(j, bpe_tok) = (None, None)
elif bpe_tok.startswith(other_tok):
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok):]
other_tok = ''
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if (other_tok == ''):
break
assert (len(bpe_indices) > 0)
alignment.append(bpe_indices)
assert (len(alignment) == len(other_tokens))
return alignment
|
def align_features_to_words(roberta, features, alignment):
'\n Align given features to words.\n\n Args:\n roberta (RobertaHubInterface): RoBERTa instance\n features (torch.Tensor): features to align of shape `(T_bpe x C)`\n alignment: alignment between BPE tokens and words returned by\n func:`align_bpe_to_words`.\n '
assert (features.dim() == 2)
bpe_counts = Counter((j for bpe_indices in alignment for j in bpe_indices))
assert (bpe_counts[0] == 0)
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = (features / denom.unsqueeze((- 1)))
output = [weighted_features[0]]
largest_j = (- 1)
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range((largest_j + 1), len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all((torch.abs((output.sum(dim=0) - features.sum(dim=0))) < 0.0001))
return output
|
def spacy_nlp():
if (getattr(spacy_nlp, '_nlp', None) is None):
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_nlp._nlp
|
def spacy_tokenizer():
if (getattr(spacy_tokenizer, '_tokenizer', None) is None):
try:
nlp = spacy_nlp()
spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_tokenizer._tokenizer
|
@register_model('camembert')
class CamembertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {'camembert.v0': 'http://dl.fbaipublicfiles.com/fairseq/models/camembert.v0.tar.gz'}
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs):
from fairseq import hub_utils
x = hub_utils.from_pretrained(model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs)
return RobertaHubInterface(x['args'], x['task'], x['models'][0])
|
@register_model('xlmr')
class XLMRModel(RobertaModel):
@classmethod
def hub_models(cls):
return {'xlmr.base': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz', 'xlmr.large': 'http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz'}
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs):
from fairseq import hub_utils
x = hub_utils.from_pretrained(model_name_or_path, checkpoint_file, data_name_or_path, archive_map=cls.hub_models(), bpe=bpe, load_checkpoint_heads=True, **kwargs)
return RobertaHubInterface(x['args'], x['task'], x['models'][0])
|
@register_model('transformer_from_pretrained_xlm')
class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
TransformerModel.add_args(parser)
parser.add_argument('--pretrained-xlm-checkpoint', type=str, metavar='STR', help='XLM model to use for initializing transformer encoder and/or decoder')
parser.add_argument('--init-encoder-only', action='store_true', help="if set, don't load the XLM weights and embeddings into decoder")
parser.add_argument('--init-decoder-only', action='store_true', help="if set, don't load the XLM weights and embeddings into encoder")
@classmethod
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, 'pretrained_xlm_checkpoint'), 'You must specify a path for --pretrained-xlm-checkpoint to use --arch transformer_from_pretrained_xlm'
assert (isinstance(task.source_dictionary, cls_dictionary) and isinstance(task.target_dictionary, cls_dictionary)), 'You should use a MaskedLMDictionary when using --arch transformer_from_pretrained_xlm because the pretrained XLM model was trained using data binarized with MaskedLMDictionary. For translation, you may want to use --task translation_from_pretrained_xlm'
assert (not (getattr(args, 'init_encoder_only', False) and getattr(args, 'init_decoder_only', False))), 'Only one of --init-encoder-only and --init-decoder-only can be set.'
return super().build_model(args, task)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens)
|
def upgrade_state_dict_with_xlm_weights(state_dict: Dict[(str, Any)], pretrained_xlm_checkpoint: str) -> Dict[(str, Any)]:
'\n Load XLM weights into a Transformer encoder or decoder model.\n\n Args:\n state_dict: state dict for either TransformerEncoder or\n TransformerDecoder\n pretrained_xlm_checkpoint: checkpoint to load XLM weights from\n\n Raises:\n AssertionError: If architecture (num layers, attention heads, etc.)\n does not match between the current Transformer encoder or\n decoder and the pretrained_xlm_checkpoint\n '
if (not os.path.exists(pretrained_xlm_checkpoint)):
raise IOError('Model file not found: {}'.format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state['model']
for key in xlm_state_dict.keys():
for search_key in ['embed_tokens', 'embed_positions', 'layers']:
if (search_key in key):
subkey = key[key.find(search_key):]
assert (subkey in state_dict), '{} Transformer encoder / decoder state_dict does not contain {}. Cannot load {} from pretrained XLM checkpoint {} into Transformer.'.format(str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
|
class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, 'init_decoder_only', False):
return
assert hasattr(args, 'pretrained_xlm_checkpoint'), '--pretrained-xlm-checkpoint must be specified to load Transformer encoder from pretrained XLM'
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
|
class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
if getattr(args, 'init_encoder_only', False):
return
assert hasattr(args, 'pretrained_xlm_checkpoint'), '--pretrained-xlm-checkpoint must be specified to load Transformer decoder from pretrained XLM'
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
|
@register_model_architecture('transformer_from_pretrained_xlm', 'transformer_from_pretrained_xlm')
def base_architecture(args):
transformer_base_architecture(args)
|
@register_model('transformer_lm')
class TransformerLanguageModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
def moses_fastbpe(path):
return {'path': path, 'tokenizer': 'moses', 'bpe': 'fastbpe'}
return {'transformer_lm.gbw.adaptive_huge': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2', 'transformer_lm.wiki103.adaptive': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.tar.bz2', 'transformer_lm.wmt19.en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2'), 'transformer_lm.wmt19.de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2'), 'transformer_lm.wmt19.ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2')}
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--no-decoder-final-norm', action='store_true', help="don't add an extra layernorm after the last decoder block")
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', action='store_true', help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings')
parser.add_argument('--character-embedding-dim', default=4, type=int, metavar='N', help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', default=2, type=int, metavar='N', help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', action='store_true', help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder')
parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--print-stats', action='store_true', help='Print MACs')
parser.add_argument('--tgt-len-ps', type=int, help='Target length for printing stats')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(','))
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.source_dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.source_dictionary), args.decoder_input_dim, task.source_dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert (args.adaptive_input_factor == args.adaptive_softmax_factor)
assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert (args.decoder_input_dim == args.decoder_output_dim)
decoder = TransformerDecoder(args, task.target_dictionary, embed_tokens, no_encoder_attn=True)
if (args.print_stats and is_master(args)):
cls.comptue_stats(args, decoder)
return TransformerLanguageModel(decoder)
@classmethod
def comptue_stats(cls, args, decoder):
target_length = args.tgt_len_ps
print((('=' * 15) * target_length))
print('{:<90} {:<20}'.format('', cls.__name__))
print((('=' * 15) * target_length))
overall_macs = 0.0
overall_params = 0.0
round_places = 2
dec_string = {}
import csv
with open('{}/decoder_stats_{}.csv'.format(args.save_dir, target_length), mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for time_step in range(1, (target_length + 1)):
for (dec_idx, (k, v)) in enumerate(decoder.compute_macs_params(src_len=1, tgt_len=time_step).items()):
if (args.share_all_embeddings and (k == 'Dec_LUT')):
macs = (v['macs'] + v['emb_macs'])
params = v['params']
else:
macs = (v['macs'] + v['emb_macs'])
params = (v['params'] + v['emb_params'])
overall_macs += macs
if (time_step == 1):
overall_params += params
macs = round((float(macs) / 1000000.0), round_places)
params = round((float(params) / 1000000.0), round_places)
if (k not in dec_string):
dec_string[k] = [[time_step, params, macs]]
else:
dec_string[k].append([time_step, params, macs])
if (dec_idx == 0):
key_list = list(v.keys())
csv_writer.writerow(((['Time'] + ['Layer']) + key_list))
value_list = list(v.values())
value_list = (([time_step] + [k]) + value_list)
csv_writer.writerow(value_list)
format_str_dec1 = '{:<20} | \t '.format('Layer')
dotted_line = ('-' * 20)
for t in range((target_length + 1)):
if (t == 0):
format_str_dec1 += '{:<10} | \t '.format('Params')
else:
format_str_dec1 += '{:<10} '.format('t_{}'.format(t))
dotted_line += ('-' * 10)
dotted_line += ('-' * 10)
format_str_dec1 += '| \t {:<10} '.format('Overall MAC')
dotted_line += ('-' * 10)
print(dotted_line)
print(format_str_dec1)
print(dotted_line)
for (layer_name, v) in dec_string.items():
time_step_str = '{:<20} | \t '.format(layer_name)
macs = 0
for (idx, (t, p, m)) in enumerate(v):
if (idx == 0):
time_step_str += '{:<10} | \t '.format(p)
time_step_str += '{:<10} '.format(m)
else:
time_step_str += '{:<10} '.format(m)
macs += m
time_step_str += '| \t {:<10} '.format(round(macs, 3))
print(time_step_str)
overall_macs = round((float(overall_macs) / 1000000.0), round_places)
overall_params = round((float(overall_params) / 1000000.0), round_places)
print((('-' * 15) * target_length))
print('Total MACs for {} decoder timesteps: {} M'.format(target_length, overall_macs))
print('Total parameters: {} M'.format(overall_params))
print((('=' * 15) * target_length))
with open('{}/overall_stats_{}.csv'.format(args.save_dir, target_length), mode='w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Time steps', target_length])
csv_writer.writerow(['Total MACs (in million)', overall_macs])
csv_writer.writerow(['Total parameters (in million)', overall_params])
|
@register_model_architecture('transformer_lm', 'transformer_lm')
def base_lm_architecture(args):
if hasattr(args, 'no_tie_adaptive_proj'):
args.no_decoder_final_norm = True
if (args.no_tie_adaptive_proj is False):
args.tie_adaptive_proj = True
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = (not args.decoder_final_norm)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.add_bos_token = getattr(args, 'add_bos_token', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.character_embeddings = getattr(args, 'character_embeddings', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)
args.print_stats = getattr(args, 'print_stats', False)
args.tgt_len_ps = getattr(args, 'tgt_len_ps', 20)
|
@register_model_architecture('transformer_lm', 'transformer_lm_big')
def transformer_lm_big(args):
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_wiki103')
@register_model_architecture('transformer_lm', 'transformer_lm_baevski_wiki103')
def transformer_lm_baevski_wiki103(args):
args.decoder_layers = getattr(args, 'decoder_layers', 16)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.dropout = getattr(args, 'dropout', 0.3)
args.adaptive_input = getattr(args, 'adaptive_input', True)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', True)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', '20000,60000')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '20000,60000')
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0.2)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', True)
transformer_lm_big(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gbw')
@register_model_architecture('transformer_lm', 'transformer_lm_baevski_gbw')
def transformer_lm_baevski_gbw(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
transformer_lm_big(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt')
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_small')
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_layers = getattr(args, 'decoder_layers', 24)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_medium')
def transformer_lm_gpt2_medium(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1280)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 5120)
args.decoder_layers = getattr(args, 'decoder_layers', 36)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 20)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_big')
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1600)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 6400)
args.decoder_layers = getattr(args, 'decoder_layers', 48)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 25)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
class AdaptiveInput(nn.Module):
def __init__(self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int], no_scale_emb: bool=False):
super().__init__()
if (vocab_size > cutoff[(- 1)]):
cutoff = (cutoff + [vocab_size])
else:
assert (vocab_size == cutoff[(- 1)]), 'cannot specify cutoff larger than vocab size'
self.cutoff = cutoff
self.embedding_dim = output_dim
self.padding_idx = padding_idx
self.embeddings = nn.ModuleList()
self.projections = nn.ModuleList()
self.embed_scales = []
self.padding_idxes = []
for i in range(len(self.cutoff)):
prev = (self.cutoff[(i - 1)] if (i > 0) else 0)
size = (self.cutoff[i] - prev)
dim = int((initial_dim // (factor ** i)))
emb = nn.Embedding(size, dim, self.padding_idx)
nn.init.normal_(emb.weight, mean=0, std=(emb.weight.shape[1] ** (- 0.5)))
nn.init.constant_(emb.weight[self.padding_idx], 0)
proj = nn.Linear(dim, output_dim, bias=False)
nn.init.xavier_uniform_(proj.weight)
self.embeddings.append(emb)
self.projections.append(proj)
self.padding_idx = None
self.embed_scales.append((1.0 if no_scale_emb else math.sqrt(dim)))
self.padding_idx = padding_idx
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def weights_for_band(self, band: int):
return (self.embeddings[band].weight, self.projections[band].weight)
def forward(self, input: torch.Tensor):
result = self._float_tensor.new((input.shape + (self.embedding_dim,)))
for i in range(len(self.cutoff)):
mask = input.lt(self.cutoff[i])
if (i > 0):
mask.mul_(input.ge(self.cutoff[(i - 1)]))
chunk_input = (input[mask] - self.cutoff[(i - 1)])
else:
chunk_input = input[mask]
if mask.any():
scaled_emb = (self.embeddings[i](chunk_input) * self.embed_scales[i])
scaled_emb = self.projections[i](scaled_emb)
result[mask] = scaled_emb
return result
def __repr__(self):
s = '{name}(cutoff={cutoff}, output_features={embedding_dim})'
for (e, p) in zip(self.embeddings, self.projections):
s += '\n \t \t {} --> {}'.format(e, p)
s += '\n'
return s.format(name=self.__class__.__name__, **self.__dict__)
def compute_macs_params(self):
embedding_macs = 0
embedding_params = 0
for m in self.embeddings:
embedding_params += sum([p.numel() for p in m.parameters()])
embedding_macs += 0
proj_macs = 0
proj_params = 0
for m in self.projections:
n_params_lin = sum([p.numel() for p in m.parameters()])
proj_macs += n_params_lin
proj_params += n_params_lin
return {'name': self.__class__.__name__, 'proj_macs': proj_macs, 'proj_params': proj_params, 'embedding_params': embedding_params, 'embedding_macs': embedding_macs}
|
class ConvTBC(torch.nn.Module):
'1D convolution over an input of shape (time x batch x channel)\n\n The implementation uses gemm to perform the convolution. This implementation\n is faster than cuDNN for small kernel sizes.\n '
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(torch.Tensor(self.kernel_size[0], in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
def __repr__(self):
s = '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, padding={padding}'
if (self.bias is None):
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
|
class DeLighTTransformerEncoderLayer(nn.Module):
'DeLight Encoder layer\n '
def __init__(self, args, embed_dim, width_multiplier=DEFAULT_WIDTH_MULTIPLIER, dextra_depth=DEFAULT_MIN_DEXTRA_LAYERS, dextra_proj=2):
super().__init__()
self.embed_dim = embed_dim
assert ((embed_dim % dextra_proj) == 0)
self.proj_dim = (embed_dim // dextra_proj)
self.dextra_layer = DExTraUnit(in_features=self.embed_dim, in_proj_features=self.proj_dim, out_features=self.proj_dim, width_multiplier=width_multiplier, dextra_depth=dextra_depth, dextra_dropout=args.delight_dropout, max_glt_groups=args.delight_enc_max_groups, act_type=args.act_type, use_bias=True, norm_type=args.norm_type, glt_shuffle=args.glt_shuffle, is_iclr_version=args.define_iclr)
self.self_attn = SingleHeadAttention(q_in_dim=self.proj_dim, kv_in_dim=self.proj_dim, proj_dim=self.proj_dim, out_dim=self.embed_dim, dropout=args.attention_dropout, bias=True, self_attention=True, encoder_decoder_attention=False)
self.self_attn_layer_norm = get_norm_layer(name=args.norm_type, out_features=self.embed_dim)
self.dropout = args.dropout
self.norm_fn = args.norm_type
self.act_type = args.act_type
self.activation_fn = get_activation_layer(name=args.act_type)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.ffn_dropout = args.ffn_dropout
ffn_red_factor = args.delight_enc_ffn_red
assert ((self.embed_dim % ffn_red_factor) == 0), '{}/{} should be a perfect divisor'.format(self.embed_dim, ffn_red_factor)
light_ffn_dim = (self.embed_dim // ffn_red_factor)
self.fc1 = get_weight_layer(name='linear', in_features=self.embed_dim, out_features=light_ffn_dim, use_bias=True)
self.fc2 = get_weight_layer(name='linear', in_features=light_ffn_dim, out_features=self.embed_dim, use_bias=True)
self.final_layer_norm = get_norm_layer(name=args.norm_type, out_features=self.embed_dim)
def __repr__(self):
s = '{name}(in_features={embed_dim}, out_features={embed_dim}, dropout={dropout},activation_dropout={activation_dropout}, ffn_dropout={ffn_dropout}, activation_fn={act_type}, norm_fn={norm_fn})'
s += '\n \t Dextra Layer: \n \t \t {}'.format(self.dextra_layer)
s += '\n \t Self Attention: \n \t \t {}'.format(self.self_attn)
s += '\n \t Light-weight FFN: \n \t |---- {} \n \t |---- {}'.format(self.fc1, self.fc2)
return s.format(name=self.__class__.__name__, **self.__dict__)
def upgrade_state_dict_named(self, state_dict, name):
'\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n '
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor]=None):
'\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where\n T_tgt is the length of query, while T_src is the length of key,\n though here both query and key is x here,\n attn_mask[t_tgt, t_src] = 1 means when calculating embedding\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n '
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if (attn_mask is not None):
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), (- 100000000.0))
x = self.dextra_layer(x)
(x, _) = self.self_attn(query=x, key_value=None, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.ffn_dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
return x
def compute_macs_params(self, S=1):
macs = 0
n_params = 0
macs_attn = 0
n_params += sum([p.numel() for p in self.self_attn_layer_norm.parameters()])
dextra_layer = self.dextra_layer.compute_macs_params()
n_params += dextra_layer['params']
macs += (dextra_layer['macs'] * S)
self_attn_layer = self.self_attn.compute_macs_params(T=S, S=S)
macs += self_attn_layer['macs']
n_params += self_attn_layer['params']
macs_attn += self_attn_layer['macs_attn']
fc1_layer = self.fc1.compute_macs_params()
macs += (fc1_layer['macs'] * S)
n_params += fc1_layer['params']
fc2_layer = self.fc2.compute_macs_params()
macs += (fc2_layer['macs'] * S)
n_params += fc2_layer['params']
n_params += sum([p.numel() for p in self.final_layer_norm.parameters()])
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params, 'macs_attn': macs_attn}
|
class DeLighTTransformerDecoderLayer(nn.Module):
'Delight Decoder layer\n '
def __init__(self, args, embed_dim, width_multiplier=DEFAULT_WIDTH_MULTIPLIER, dextra_depth=DEFAULT_MIN_DEXTRA_LAYERS, no_encoder_attn=False, dextra_proj=2, *unused_args, **unused_kwargs):
super().__init__()
self.embed_dim = embed_dim
assert ((embed_dim % dextra_proj) == 0)
self.proj_dim = (embed_dim // dextra_proj)
self.norm_fn = args.norm_type
self.act_type = args.act_type
self.dextra_layer_sa = DExTraUnit(in_features=self.embed_dim, in_proj_features=self.proj_dim, out_features=self.proj_dim, width_multiplier=width_multiplier, dextra_depth=dextra_depth, dextra_dropout=args.delight_dropout, max_glt_groups=args.delight_dec_max_groups, act_type=args.act_type, use_bias=True, norm_type=args.norm_type, glt_shuffle=args.glt_shuffle, is_iclr_version=args.define_iclr)
self.self_attn = SingleHeadAttention(q_in_dim=self.proj_dim, kv_in_dim=self.proj_dim, proj_dim=self.proj_dim, out_dim=self.embed_dim, dropout=args.attention_dropout, bias=True, self_attention=True, encoder_decoder_attention=False)
self.dropout = args.dropout
self.activation_fn = get_activation_layer(name=args.act_type)
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = get_norm_layer(name=args.norm_type, out_features=self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
q_embed_dim = self.embed_dim
self.encoder_attn = SingleHeadAttention(q_in_dim=q_embed_dim, kv_in_dim=self.embed_dim, proj_dim=self.proj_dim, out_dim=self.embed_dim, dropout=args.attention_dropout, bias=True, encoder_decoder_attention=True, self_attention=False)
self.encoder_attn_layer_norm = get_norm_layer(name=args.norm_type, out_features=self.embed_dim)
self.ffn_dropout = args.ffn_dropout
ffn_red_factor = args.delight_dec_ffn_red
assert ((self.embed_dim % ffn_red_factor) == 0), '{}/{} should be a perfect divisor'.format(self.embed_dim, ffn_red_factor)
light_ffn_dim = (self.embed_dim // ffn_red_factor)
self.fc1 = get_weight_layer(name='linear', in_features=self.embed_dim, out_features=light_ffn_dim, use_bias=True)
self.fc2 = get_weight_layer(name='linear', in_features=light_ffn_dim, out_features=self.embed_dim, use_bias=True)
self.final_layer_norm = get_norm_layer(name=args.norm_type, out_features=self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def __repr__(self):
s = '{name}(in_features={embed_dim}, out_features={embed_dim}, dropout={dropout}, activation_dropout={activation_dropout}, ffn_dropout={ffn_dropout}, activation_fn={act_type}, norm_fn={norm_fn})'
s += '\n \t Dextra Layer (Query): \n \t \t {}'.format(self.dextra_layer_sa)
s += '\n \t Self Attention (Decoder): \n \t \t {}'.format(self.self_attn)
if (self.encoder_attn is not None):
s += '\n \t Encoder-Decoder Attention: \n \t \t {}'.format(self.encoder_attn)
s += '\n \t Light-weight FFN: \n \t |---- {} \n \t |---- {}'.format(self.fc1, self.fc2)
return s.format(name=self.__class__.__name__, **self.__dict__)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, prev_self_attn_state: Optional[List[torch.Tensor]]=None, prev_attn_state: Optional[List[torch.Tensor]]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False):
'\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n '
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x = self.dextra_layer_sa(x)
if (prev_self_attn_state is not None):
(prev_key, prev_value) = prev_self_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_self_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_self_attn_state[2]
assert (incremental_state is not None)
self.self_attn._set_input_buffer(incremental_state, saved_state)
(x, attn) = self.self_attn(query=x, key_value=None, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
if (self.encoder_attn is not None):
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if (prev_attn_state is not None):
(prev_key, prev_value) = prev_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_attn_state[2]
assert (incremental_state is not None)
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
(x, attn) = self.encoder_attn(query=x, key_value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.ffn_dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
if (self.onnx_trace and (incremental_state is not None)):
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert (saved_state is not None)
if (self_attn_padding_mask is not None):
self_attn_state = [saved_state['prev_key'], saved_state['prev_value'], saved_state['prev_key_padding_mask']]
else:
self_attn_state = [saved_state['prev_key'], saved_state['prev_value']]
return (x, attn, self_attn_state)
return (x, attn, None)
def make_generation_fast_(self, need_attn: bool=False, **kwargs):
self.need_attn = need_attn
def compute_macs_params(self, T=1, S=1):
macs = 0
n_params = 0
macs_attn = 0
n_params += sum([p.numel() for p in self.self_attn_layer_norm.parameters()])
self_attn_layer = self.self_attn.compute_macs_params(T=T, S=T)
dextra_layer = self.dextra_layer_sa.compute_macs_params()
macs += (self_attn_layer['macs'] + (dextra_layer['macs'] * T))
n_params += (self_attn_layer['params'] + dextra_layer['params'])
macs_attn += self_attn_layer['macs_attn']
if (self.encoder_attn is not None):
n_params += sum([p.numel() for p in self.encoder_attn_layer_norm.parameters()])
enc_attn = self.encoder_attn.compute_macs_params(T=T, S=S)
macs += enc_attn['macs']
n_params += enc_attn['params']
macs_attn += enc_attn['macs_attn']
fc1_layer = self.fc1.compute_macs_params()
macs += (fc1_layer['macs'] * T)
n_params += fc1_layer['params']
fc2_layer = self.fc2.compute_macs_params()
macs += (fc2_layer['macs'] * T)
n_params += fc2_layer['params']
n_params += sum([p.numel() for p in self.final_layer_norm.parameters()])
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params, 'macs_attn': macs_attn}
|
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
blocks = [32, 64, 128, 256]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "dynamicconv_cuda.cuh"\n\nstd::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = weight.size(1);\n const auto filterSize = weight.size(2);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n const dim3 blocks(minibatch, numFeatures);\n\n auto output = at::zeros_like(input);\n auto stream = at::cuda::getCurrentCUDAStream();\n'
switch = '\n switch(filterSize) {\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {pad}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{\n dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n weight.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n output.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;\n }\n break;\n\n'
end = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;\n }\n\n return {output};\n}\n'
with open('dynamicconv_cuda_forward.cu', 'w') as forward:
forward.write(head)
forward.write(switch)
for k in kernels:
b_size = 32
for b in blocks:
if (b > k):
b_size = b
break
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=b_size, pad=pad))
forward.write(bad_padding)
forward.write(end)
|
def gen_backward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
thresh = [512, 512, 512, 512, 512, 380, 256, 256]
min_block = [64, 64, 64, 64, 64, 64, 128, 256]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "dynamicconv_cuda.cuh"\n\nstd::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = weight.size(1);\n const auto filterSize = weight.size(2);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n auto numChunks = 1;\n\n auto gradInput = at::zeros_like(input);\n auto gradWeight = at::zeros_like(weight);\n auto stream = at::cuda::getCurrentCUDAStream();\n\n dim3 blocks(minibatch, numHeads, numChunks);\n'
sequence_if = '\n if (sequenceLength < {seq}) {{\n switch(filterSize) {{\n'
case_k = '\n case {k}:\n'
chunks_reset = '\n numChunks = int(ceilf(sequenceLength/float({b_size})));\n blocks = dim3(minibatch, numHeads, numChunks);\n'
main_block = '\n if (padding_l == {p}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{\n dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n gradOutput.data<scalar_t>(),\n input.data<scalar_t>(),\n weight.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n gradWeight.data<scalar_t>(),\n gradInput.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;\n }\n break;\n\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;\n }\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
last_return = '\n }\n return {gradInput, gradWeight};\n}\n'
with open('dynamicconv_cuda_backward.cu', 'w') as backward:
backward.write(head)
for seq in seqs:
backward.write(sequence_if.format(seq=seq))
for (k, t, m) in zip(kernels, thresh, min_block):
backward.write(case_k.format(k=k))
if (seq <= t):
b_size = seq
else:
b_size = m
backward.write(chunks_reset.format(b_size=b_size))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=b_size, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(con_else)
backward.write(final_else)
for (k, m) in zip(kernels, min_block):
backward.write(case_k.format(k=k))
backward.write(chunks_reset.format(b_size=m))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=m, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(last_return)
|
def gelu_accurate(x):
if (not hasattr(gelu_accurate, '_a')):
gelu_accurate._a = math.sqrt((2 / math.pi))
return ((0.5 * x) * (1 + torch.tanh((gelu_accurate._a * (x + (0.044715 * torch.pow(x, 3)))))))
|
def gelu(x: torch.Tensor) -> torch.Tensor:
if hasattr(torch.nn.functional, 'gelu'):
return torch.nn.functional.gelu(x.float()).type_as(x)
else:
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return ((grad * ctx.scale), None)
|
class Highway(torch.nn.Module):
'\n A `Highway layer <https://arxiv.org/abs/1505.00387>`_.\n Adopted from the AllenNLP implementation.\n '
def __init__(self, input_dim: int, num_layers: int=1):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList([nn.Linear(input_dim, (input_dim * 2)) for _ in range(num_layers)])
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
nn.init.constant_(layer.bias[self.input_dim:], 1)
nn.init.constant_(layer.bias[:self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
(proj_x, gate) = projection.chunk(2, dim=(- 1))
proj_x = self.activation(proj_x)
gate = torch.sigmoid(gate)
x = ((gate * x) + ((gate.new_tensor([1]) - gate) * proj_x))
return x
|
def LayerNorm(normalized_shape, eps=1e-05, elementwise_affine=True, export=False):
if ((not export) and torch.cuda.is_available()):
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
|
class LearnedPositionalEmbedding(nn.Embedding):
'\n This module learns positional embeddings up to a fixed maximum size.\n Padding ids are ignored by either offsetting based on padding_idx\n or by setting padding_idx to None and ensuring that the appropriate\n position ids are passed to the forward function.\n '
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if (self.padding_idx is not None):
self.max_positions = ((self.num_embeddings - self.padding_idx) - 1)
else:
self.max_positions = self.num_embeddings
def forward(self, input, incremental_state=None, positions=None):
'Input is expected to be of size [bsz x seqlen].'
assert ((positions is None) or (self.padding_idx is None)), 'If positions is pre-computed then padding_idx should not be set.'
if (positions is None):
if (incremental_state is not None):
positions = input.data.new(1, 1).fill_(int((self.padding_idx + input.size(1))))
else:
positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace)
return super().forward(positions)
|
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "lightconv_cuda.cuh"\n\nstd::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = filters.size(0);\n const auto filterSize = filters.size(1);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n\n const dim3 blocks(minibatch, numFeatures);\n\n auto output = at::zeros_like(input);\n auto stream = at::cuda::getCurrentCUDAStream();\n'
sequence_if = '\n if (sequenceLength <= {seq}) {{\n switch(filterSize) {{\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {pad}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{\n lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n filters.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n output.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;\n }\n break;\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;\n }\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
final_return = '\n }\n\n return {output};\n}\n'
with open('lightconv_cuda_forward.cu', 'w') as forward:
forward.write(head)
for seq in seqs:
forward.write(sequence_if.format(seq=seq))
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(con_else)
forward.write(final_else)
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(final_return)
|
def gen_backward():
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "lightconv_cuda.cuh"\n\nstd::vector<at::Tensor> lightconv_cuda_backward(\n at::Tensor gradOutput,\n int padding_l,\n at::Tensor input,\n at::Tensor filters) {\n\n // gradWrtInput\n const int minibatch = input.size(0);\n const int numFeatures = input.size(1);\n const int sequenceLength = input.size(2);\n\n const int numHeads = filters.size(0);\n const int filterSize = filters.size(1);\n\n const dim3 gradBlocks(minibatch, numFeatures);\n const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);\n const dim3 weightGradSecondpassBlocks(numHeads, filterSize);\n\n const int numFiltersInBlock = numFeatures / numHeads;\n\n auto gradInput = at::zeros_like(input);\n auto gradFilters = at::zeros_like(filters);\n\n at::DeviceGuard g(input.device());\n auto stream = at::cuda::getCurrentCUDAStream();\n\n switch(filterSize) {\n'
sequence_if = '\n if (sequenceLength <= {seq}) {{\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {p}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{\n lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<gradBlocks, {b_size}, 0, stream>>>(\n gradOutput.data<scalar_t>(),\n filters.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n gradInput.data<scalar_t>());\n\n'
weight_grad_short = '\n at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));\n lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n gradOutput.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n tempSumGradFilters.data<float>()\n );\n\n lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>\n <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(\n tempSumGradFilters.data<float>(),\n minibatch,\n numFiltersInBlock,\n gradFilters.data<scalar_t>()\n );\n }}));\n }} else\n'
weight_grad = '\n at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));\n lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<gradBlocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n gradOutput.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n tempSumGradFilters.data<float>()\n );\n\n lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>\n <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(\n tempSumGradFilters.data<float>(),\n minibatch,\n numFiltersInBlock,\n gradFilters.data<scalar_t>()\n );\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;\n }\n'
breakout = '\n break;\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
last_return = '\n }\n return {gradInput, gradFilters};\n}\n'
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, (- 1), (- 1), (- 1)]
max_mem = [(- 1), (- 1), (- 1), (- 1), (- 1), 192, 96, 64]
with open('lightconv_cuda_backward.cu', 'w') as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (((t == (- 1)) or (seq <= t)) and ((mem == (- 1)) or (seq < mem))):
backward.write(sequence_if.format(seq=seq))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return)
|
class LogSumExpMoE(torch.autograd.Function):
'Standard LogSumExp forward pass, but use *posterior* for the backward.\n\n See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"\n (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.\n '
@staticmethod
def forward(ctx, logp, posterior, dim=(- 1)):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
(posterior,) = ctx.saved_tensors
grad_logp = (grad_output.unsqueeze(ctx.dim) * posterior)
return (grad_logp, None, None)
|
class MeanPoolGatingNetwork(torch.nn.Module):
"A simple mean-pooling gating network for selecting experts.\n\n This module applies mean pooling over an encoder's output and returns\n reponsibilities for each expert. The encoder format is expected to match\n :class:`fairseq.models.transformer.TransformerEncoder`.\n "
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = (torch.nn.Dropout(dropout) if (dropout is not None) else None)
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if (not (hasattr(encoder_out, 'encoder_out') and hasattr(encoder_out, 'encoder_padding_mask') and (encoder_out.encoder_out.size(2) == self.embed_dim))):
raise ValueError('Unexpected format for encoder_out')
encoder_padding_mask = encoder_out.encoder_padding_mask
encoder_out = encoder_out.encoder_out.transpose(0, 1)
if (encoder_padding_mask is not None):
encoder_out = encoder_out.clone()
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum((~ encoder_padding_mask), dim=1, keepdim=True)
x = (torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out))
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if (self.dropout is not None):
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1), dtype=torch.float32).type_as(x)
|
@with_incremental_state
class MultiheadAttention(nn.Module):
'Multi-headed attention.\n\n See "Attention Is All You Need" for more details.\n '
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = (kdim if (kdim is not None) else embed_dim)
self.vdim = (vdim if (vdim is not None) else embed_dim)
self.qkv_same_dim = ((self.kdim == embed_dim) and (self.vdim == embed_dim))
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = (embed_dim // num_heads)
assert ((self.head_dim * num_heads) == self.embed_dim), 'embed_dim must be divisible by num_heads'
self.scaling = (self.head_dim ** (- 0.5))
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert ((not self.self_attention) or self.qkv_same_dim), 'Self-attention requires query, key and value to be of the same size'
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.enable_torch_version = False
if hasattr(F, 'multi_head_attention_forward'):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=(1 / math.sqrt(2)))
nn.init.xavier_uniform_(self.v_proj.weight, gain=(1 / math.sqrt(2)))
nn.init.xavier_uniform_(self.q_proj.weight, gain=(1 / math.sqrt(2)))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if (self.out_proj.bias is not None):
nn.init.constant_(self.out_proj.bias, 0.0)
if (self.bias_k is not None):
nn.init.xavier_normal_(self.bias_k)
if (self.bias_v is not None):
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, need_weights: bool=True, static_kv: bool=False, attn_mask: Optional[Tensor]=None, before_softmax: bool=False, need_head_weights: bool=False) -> Tuple[(Tensor, Optional[Tensor])]:
'Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n '
if need_head_weights:
need_weights = True
(tgt_len, bsz, embed_dim) = query.size()
assert (embed_dim == self.embed_dim)
assert (list(query.size()) == [tgt_len, bsz, embed_dim])
if (self.enable_torch_version and (not self.onnx_trace) and (incremental_state is None) and (not static_kv)):
assert ((key is not None) and (value is not None))
return F.multi_head_attention_forward(query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight)
if (incremental_state is not None):
saved_state = self._get_input_buffer(incremental_state)
if ((saved_state is not None) and ('prev_key' in saved_state)):
if static_kv:
assert (self.encoder_decoder_attention and (not self.self_attention))
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if (key is None):
assert (value is None)
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert ((key is not None) and (value is not None))
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if (self.bias_k is not None):
assert (self.bias_v is not None)
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if (attn_mask is not None):
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if (key_padding_mask is not None):
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (k is not None):
k = k.contiguous().view((- 1), (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (v is not None):
v = v.contiguous().view((- 1), (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (saved_state is not None):
if ('prev_key' in saved_state):
_prev_key = saved_state['prev_key']
assert (_prev_key is not None)
prev_key = _prev_key.view((bsz * self.num_heads), (- 1), self.head_dim)
if static_kv:
k = prev_key
else:
assert (k is not None)
k = torch.cat([prev_key, k], dim=1)
if ('prev_value' in saved_state):
_prev_value = saved_state['prev_value']
assert (_prev_value is not None)
prev_value = _prev_value.view((bsz * self.num_heads), (- 1), self.head_dim)
if static_kv:
v = prev_value
else:
assert (v is not None)
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if ('prev_key_padding_mask' in saved_state):
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert ((k is not None) and (v is not None))
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv)
saved_state['prev_key'] = k.view(bsz, self.num_heads, (- 1), self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, (- 1), self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
assert (incremental_state is not None)
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert (k is not None)
src_len = k.size(1)
if ((key_padding_mask is not None) and (key_padding_mask.dim() == 0)):
key_padding_mask = None
if (key_padding_mask is not None):
assert (key_padding_mask.size(0) == bsz)
assert (key_padding_mask.size(1) == src_len)
if self.add_zero_attn:
assert (v is not None)
src_len += 1
k = torch.cat([k, k.new_zeros(((k.size(0), 1) + k.size()[2:]))], dim=1)
v = torch.cat([v, v.new_zeros(((v.size(0), 1) + v.size()[2:]))], dim=1)
if (attn_mask is not None):
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if (key_padding_mask is not None):
key_padding_mask = torch.cat([key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert (list(attn_weights.size()) == [(bsz * self.num_heads), tgt_len, src_len])
if (attn_mask is not None):
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if (key_padding_mask is not None):
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float('-inf'))
attn_weights = attn_weights.view((bsz * self.num_heads), tgt_len, src_len)
if before_softmax:
return (attn_weights, v)
attn_weights_float = utils.softmax(attn_weights, dim=(- 1), onnx_trace=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
assert (v is not None)
attn = torch.bmm(attn_probs, v)
assert (list(attn.size()) == [(bsz * self.num_heads), tgt_len, self.head_dim])
if (self.onnx_trace and (attn.size(1) == 1)):
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if (not need_head_weights):
attn_weights = attn_weights.mean(dim=0)
return (attn, attn_weights)
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool) -> Optional[Tensor]:
if ((prev_key_padding_mask is not None) and static_kv):
new_key_padding_mask = prev_key_padding_mask
elif ((prev_key_padding_mask is not None) and (key_padding_mask is not None)):
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), key_padding_mask.float()], dim=1)
elif (prev_key_padding_mask is not None):
filler = torch.zeros(batch_size, (src_len - prev_key_padding_mask.size(1)))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), filler.float()], dim=1)
elif (key_padding_mask is not None):
filler = torch.zeros(batch_size, (src_len - key_padding_mask.size(1)))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], new_order):
'Reorder buffered internal state (for incremental generation).'
input_buffer = self._get_input_buffer(incremental_state)
if (input_buffer is not None):
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if (input_buffer_k is not None):
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]) -> Dict[(str, Optional[Tensor])]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if (result is not None):
return result
else:
empty_result: Dict[(str, Optional[Tensor])] = {}
return empty_result
def _set_input_buffer(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], buffer: Dict[(str, Optional[Tensor])]):
return self.set_incremental_state(incremental_state, 'attn_state', buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = ((name + '.') if (name != '') else '')
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith((prefix + 'in_proj_weight')):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.weight')] = state_dict[k][:dim]
items_to_add[(prefix + 'k_proj.weight')] = state_dict[k][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.weight')] = state_dict[k][(2 * dim):]
keys_to_remove.append(k)
k_bias = (prefix + 'in_proj_bias')
if (k_bias in state_dict.keys()):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.bias')] = state_dict[k_bias][:dim]
items_to_add[(prefix + 'k_proj.bias')] = state_dict[k_bias][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.bias')] = state_dict[k_bias][(2 * dim):]
keys_to_remove.append((prefix + 'in_proj_bias'))
for k in keys_to_remove:
del state_dict[k]
for (key, value) in items_to_add.items():
state_dict[key] = value
def compute_macs_params(self, T=1, S=1):
macs = 0
n_params = 0
C = self.embed_dim
num_macs_kq = ((T * S) * C)
num_macs_v = ((T * C) * S)
macs += (num_macs_kq + num_macs_v)
if self.self_attention:
assert (T == S)
q_params = sum([p.numel() for p in self.q_proj.parameters()])
k_params = sum([p.numel() for p in self.k_proj.parameters()])
v_params = sum([p.numel() for p in self.v_proj.parameters()])
macs += (((q_params * T) + (k_params * T)) + (v_params * T))
n_params += ((q_params + v_params) + k_params)
elif self.encoder_decoder_attention:
q_params = sum([p.numel() for p in self.q_proj.parameters()])
k_params = sum([p.numel() for p in self.k_proj.parameters()])
v_params = sum([p.numel() for p in self.v_proj.parameters()])
macs += (((q_params * T) + (k_params * S)) + (v_params * S))
n_params += ((q_params + v_params) + k_params)
else:
raise NotImplementedError
out_params = sum([p.numel() for p in self.out_proj.parameters()])
macs += (out_params * T)
n_params += out_params
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params, 'macs_attn': (num_macs_kq + num_macs_v)}
|
def PositionalEmbedding(num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool=False):
if learned:
if (padding_idx is not None):
num_embeddings = ((num_embeddings + padding_idx) + 1)
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=(embedding_dim ** (- 0.5)))
if (padding_idx is not None):
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(embedding_dim, padding_idx, init_size=((num_embeddings + padding_idx) + 1))
return m
|
class ScalarBias(torch.autograd.Function):
'\n Adds a vector of scalars, used in self-attention mechanism to allow\n the model to optionally attend to this vector instead of the past\n '
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, (size[dim] - 1)).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return (grad.narrow(ctx.dim, 1, (grad.size(ctx.dim) - 1)), None, None)
|
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
|
@with_incremental_state
class SingleHeadAttention(nn.Module):
'Single head attention as defined in DeLighT paper\n '
def __init__(self, q_in_dim, kv_in_dim, proj_dim, out_dim, dropout=0.0, bias=True, self_attention=False, encoder_decoder_attention=False):
"\n :param embed_dim: Input dimension\n :param out_dim: Output dimension\n :param dropout: attention dropout\n :param bias: use bias or not\n :param self_attention: Using for self attention or not\n :param encoder_decoder_attention: Using for encoder-decoder attention or not\n :param qkv_proj: Project QKV or not. This is useful for projecting encoder output to query's dimensionality\n "
super(SingleHeadAttention, self).__init__()
self.q_embed_dim = q_in_dim
self.kv_embed_dim = kv_in_dim
self.proj_dim = proj_dim
self.out_dim = out_dim
self.dropout = dropout
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
if self.self_attention:
assert (q_in_dim == kv_in_dim)
self.linear_kqv = get_weight_layer(name='linear', in_features=self.q_embed_dim, out_features=self.proj_dim, use_bias=True, gates=3)
elif self.encoder_decoder_attention:
self.linear_q = get_weight_layer(name='linear', in_features=self.q_embed_dim, out_features=self.proj_dim, use_bias=True, gates=1)
self.linear_kv = get_weight_layer(name='linear', in_features=self.kv_embed_dim, out_features=self.proj_dim, use_bias=True, gates=2)
self.scaling = (self.proj_dim ** (- 0.5))
self.out_proj = get_weight_layer(name='linear', in_features=self.proj_dim, out_features=self.out_dim, use_bias=True)
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def __repr__(self):
s = '{name}(q_in_features={q_embed_dim}, kv_in_features={kv_embed_dim}, out_features={out_dim}, attn_dropout={dropout}, self_attention={self_attention}, encoder_decoder_attention={encoder_decoder_attention})'
if self.self_attention:
s += '\n \t |---- KQV function: \t {}'.format(self.linear_kqv)
elif self.encoder_decoder_attention:
s += '\n \t |---- KV function: \t {}'.format(self.linear_kv)
s += '\n \t |---- Q function: \t {}'.format(self.linear_q)
s += '\n \t |---- Proj: {}'.format(self.out_proj)
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, query, key_value: Optional[Tensor], key_padding_mask: Optional[Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, need_weights: bool=True, static_kv: bool=False, attn_mask: Optional[Tensor]=None, before_softmax: bool=False, need_head_weights: bool=False) -> Tuple[(Tensor, Optional[Tensor])]:
'Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n '
if need_head_weights:
need_weights = True
(tgt_len, bsz, q_embed_dim) = query.size()
assert (q_embed_dim == self.q_embed_dim), 'Error in {}. {} != {}'.format(self.__class__.__name__, q_embed_dim, self.q_embed_dim)
assert (list(query.size()) == [tgt_len, bsz, q_embed_dim])
if (incremental_state is not None):
saved_state = self._get_input_buffer(incremental_state)
if ((saved_state is not None) and ('prev_key' in saved_state)):
if static_kv:
assert (self.encoder_decoder_attention and (not self.self_attention))
key_value = None
else:
saved_state = None
if self.self_attention:
(q, k, v) = torch.chunk(self.linear_kqv(query), chunks=3, dim=(- 1))
elif self.encoder_decoder_attention:
q = self.linear_q(query)
if (key_value is None):
k = v = None
else:
(k, v) = torch.chunk(self.linear_kv(key_value), chunks=2, dim=(- 1))
else:
raise NotImplementedError
q *= self.scaling
q = q.contiguous().transpose(0, 1)
if (k is not None):
k = k.contiguous().transpose(0, 1)
if (v is not None):
v = v.contiguous().transpose(0, 1)
if (saved_state is not None):
if ('prev_key' in saved_state):
prev_key = saved_state['prev_key']
assert (prev_key is not None)
if static_kv:
k = prev_key
else:
assert (k is not None)
k = torch.cat([prev_key, k], dim=1)
if ('prev_value' in saved_state):
prev_value = saved_state['prev_value']
assert (prev_value is not None)
if static_kv:
v = prev_value
else:
assert (v is not None)
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if ('prev_key_padding_mask' in saved_state):
prev_key_padding_mask = saved_state['prev_key_padding_mask']
assert ((k is not None) and (v is not None))
key_padding_mask = SingleHeadAttention._append_prev_key_padding_mask(key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv)
saved_state['prev_key'] = k
saved_state['prev_value'] = v
saved_state['prev_key_padding_mask'] = key_padding_mask
assert (incremental_state is not None)
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert (k is not None)
src_len = k.size(1)
if ((key_padding_mask is not None) and (key_padding_mask.dim() == 0)):
key_padding_mask = None
if (key_padding_mask is not None):
assert (key_padding_mask.size(0) == bsz)
assert (key_padding_mask.size(1) == src_len)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = SingleHeadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert (list(attn_weights.size()) == [bsz, tgt_len, src_len])
if (attn_mask is not None):
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if (key_padding_mask is not None):
attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).to(torch.bool), float('-inf'))
if before_softmax:
return (attn_weights, v)
attn_weights_float = utils.softmax(attn_weights, dim=(- 1), onnx_trace=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
assert (v is not None)
attn = torch.bmm(attn_probs, v)
assert (list(attn.size()) == [bsz, tgt_len, self.proj_dim])
attn = attn.transpose(0, 1).contiguous()
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights.transpose(1, 0)
return (attn, attn_weights)
else:
attn_weights_tmp: Optional[Tensor] = None
return (attn, attn_weights_tmp)
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool) -> Optional[Tensor]:
if ((prev_key_padding_mask is not None) and static_kv):
new_key_padding_mask = prev_key_padding_mask
elif ((prev_key_padding_mask is not None) and (key_padding_mask is not None)):
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), key_padding_mask.float()], dim=1)
elif (prev_key_padding_mask is not None):
filler = torch.zeros(batch_size, (src_len - prev_key_padding_mask.size(1)))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat([prev_key_padding_mask.float(), filler.float()], dim=1)
elif (key_padding_mask is not None):
filler = torch.zeros(batch_size, (src_len - key_padding_mask.size(1)))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat([filler.float(), key_padding_mask.float()], dim=1)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], new_order):
'Reorder buffered internal state (for incremental generation).'
input_buffer = self._get_input_buffer(incremental_state)
if (input_buffer is not None):
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if (input_buffer_k is not None):
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(self, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]) -> Dict[(str, Optional[Tensor])]:
result = self.get_incremental_state(incremental_state, 'attn_state')
if (result is not None):
return result
else:
empty_result: Dict[(str, Optional[Tensor])] = {}
return empty_result
def _set_input_buffer(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], buffer: Dict[(str, Optional[Tensor])]):
return self.set_incremental_state(incremental_state, 'attn_state', buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = ((name + '.') if (name != '') else '')
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith((prefix + 'in_proj_weight')):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.weight')] = state_dict[k][:dim]
items_to_add[(prefix + 'k_proj.weight')] = state_dict[k][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.weight')] = state_dict[k][(2 * dim):]
keys_to_remove.append(k)
k_bias = (prefix + 'in_proj_bias')
if (k_bias in state_dict.keys()):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.bias')] = state_dict[k_bias][:dim]
items_to_add[(prefix + 'k_proj.bias')] = state_dict[k_bias][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.bias')] = state_dict[k_bias][(2 * dim):]
keys_to_remove.append((prefix + 'in_proj_bias'))
for k in keys_to_remove:
del state_dict[k]
for (key, value) in items_to_add.items():
state_dict[key] = value
def compute_macs_params(self, T=1, S=1):
macs = 0
n_params = 0
C = self.proj_dim
num_macs_kq = ((T * S) * C)
num_macs_v = ((T * C) * S)
macs += (num_macs_kq + num_macs_v)
if self.self_attention:
assert (T == S)
q_params = sum([p.numel() for p in self.linear_kqv.parameters()])
macs += (q_params * T)
n_params += q_params
elif self.encoder_decoder_attention:
q_params = sum([p.numel() for p in self.linear_q.parameters()])
kv_params = sum([p.numel() for p in self.linear_kv.parameters()])
macs += ((q_params * T) + (kv_params * S))
n_params += (q_params + kv_params)
else:
raise NotImplementedError
out_params = sum([p.numel() for p in self.out_proj.parameters()])
macs += (out_params * T)
n_params += out_params
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params, 'macs_attn': (num_macs_kq + num_macs_v)}
|
class SparseMultiheadAttention(MultiheadAttention):
' Sparse Multi-Headed Attention.\n\n "Generating Long Sequences with Sparse Transformers". Implements\n fixed factorized self attention, where l=stride and c=expressivity.\n A(1) includes all words in the stride window and A(2) takes a summary of c\n words from the end of each stride window.\n If is_bidirectional=False, we do not include any words past the current word,\n as in the paper.\n '
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True):
super().__init__(embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention)
self.is_bidirectional = is_bidirectional
self.stride = stride
self.expressivity = expressivity
assert ((self.stride > 0) and (self.stride >= self.expressivity))
def compute_checkpoint(self, word_index):
if (((word_index % self.stride) == 0) and (word_index != 0)):
checkpoint_index = (word_index - self.expressivity)
else:
checkpoint_index = (((math.floor((word_index / self.stride)) * self.stride) + self.stride) - self.expressivity)
return checkpoint_index
def compute_subset_summaries(self, absolute_max):
checkpoint_index = self.compute_checkpoint(0)
subset_two = set()
while (checkpoint_index <= (absolute_max - 1)):
summary = set(range(checkpoint_index, min(((checkpoint_index + self.expressivity) + 1), absolute_max)))
subset_two = subset_two.union(summary)
checkpoint_index = self.compute_checkpoint((checkpoint_index + self.stride))
return subset_two
def compute_fixed_attention_subset(self, word_index, tgt_len):
if (not self.is_bidirectional):
absolute_max = (word_index + 1)
else:
absolute_max = tgt_len
rounded_index = (math.floor(((word_index + self.stride) / self.stride)) * self.stride)
if (((word_index % self.stride) == 0) and (word_index != 0)):
subset_one = set(range((word_index - self.stride), min(absolute_max, (word_index + 1))))
else:
subset_one = set(range(max(0, (rounded_index - self.stride)), min(absolute_max, (rounded_index + 1))))
subset_two = set()
if (not self.is_bidirectional):
subset_two = self.compute_subset_summaries(absolute_max)
return subset_one.union(subset_two)
def buffered_sparse_mask(self, tensor, tgt_len, src_len):
assert (tgt_len > self.stride)
sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float('-inf'))
subset_summaries = set()
if self.is_bidirectional:
subset_summaries = self.compute_subset_summaries(tgt_len)
for i in range(tgt_len):
fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
included_word_indices = torch.LongTensor(list(fixed_attention_subset))
sparse_mask[i].index_fill_(0, included_word_indices, 0)
return sparse_mask.type_as(tensor)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
sparse_mask = sparse_mask.unsqueeze(0).expand((bsz * self.num_heads), tgt_len, src_len)
attn_weights += sparse_mask
|
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
'\n Sparse implementation of the TransformerSentenceEncoder\n - see SparseMultiheadAttention\n '
def __init__(self, padding_idx: int, vocab_size: int, num_encoder_layers: int=6, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, max_seq_len: int=256, num_segments: int=2, use_position_embeddings: bool=True, offset_positions_by_padding: bool=True, encoder_normalize_before: bool=False, apply_bert_init: bool=False, activation_fn: str='relu', learned_pos_embedding: bool=True, add_bias_kv: bool=False, add_zero_attn: bool=False, embed_scale: float=None, freeze_embeddings: bool=False, n_trans_layers_to_freeze: int=0, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, add_bias_kv, add_zero_attn, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export)
self.layers = nn.ModuleList([SparseTransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) for _ in range(num_encoder_layers)])
def freeze_module_params(m):
if (m is not None):
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
|
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
'\n Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)\n '
def __init__(self, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, add_bias_kv, add_zero_attn, export)
self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity)
|
class TransformerEncoderLayer(nn.Module):
'Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n '
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def upgrade_state_dict_named(self, state_dict, name):
'\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n '
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor]=None):
'\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where\n T_tgt is the length of query, while T_src is the length of key,\n though here both query and key is x here,\n attn_mask[t_tgt, t_src] = 1 means when calculating embedding\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n '
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if (attn_mask is not None):
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), (- 100000000.0))
(x, _) = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
return x
def compute_macs_params(self, S=1):
macs = 0
n_params = 0
macs_attn = 0
n_params += sum([p.numel() for p in self.self_attn_layer_norm.parameters()])
n_params += sum([p.numel() for p in self.final_layer_norm.parameters()])
self_attn_layer = self.self_attn.compute_macs_params(T=S, S=S)
macs += self_attn_layer['macs']
n_params += self_attn_layer['params']
macs_attn += self_attn_layer['macs_attn']
fc1_params = sum([p.numel() for p in self.fc1.parameters()])
macs += (fc1_params * S)
n_params += fc1_params
fc2_params = sum([p.numel() for p in self.fc2.parameters()])
macs += (fc2_params * S)
n_params += fc2_params
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params, 'macs_attn': macs_attn}
|
class TransformerDecoderLayer(nn.Module):
'Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n '
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.self_attn = MultiheadAttention(embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=(not self.cross_self_attention))
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(self.embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, prev_self_attn_state: Optional[List[torch.Tensor]]=None, prev_attn_state: Optional[List[torch.Tensor]]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False):
'\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n '
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if (prev_self_attn_state is not None):
(prev_key, prev_value) = prev_self_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_self_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_self_attn_state[2]
assert (incremental_state is not None)
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if (self.cross_self_attention and (not ((incremental_state is not None) and (_self_attn_input_buffer is not None) and ('prev_key' in _self_attn_input_buffer)))):
if (self_attn_mask is not None):
assert (encoder_out is not None)
self_attn_mask = torch.cat((x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1)
if (self_attn_padding_mask is not None):
if (encoder_padding_mask is None):
assert (encoder_out is not None)
encoder_padding_mask = self_attn_padding_mask.new_zeros(encoder_out.size(1), encoder_out.size(0))
self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1)
assert (encoder_out is not None)
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
(x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
if (self.encoder_attn is not None):
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if (prev_attn_state is not None):
(prev_key, prev_value) = prev_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_attn_state[2]
assert (incremental_state is not None)
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
(x, attn) = self.encoder_attn(query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
if (not self.normalize_before):
x = self.final_layer_norm(x)
if (self.onnx_trace and (incremental_state is not None)):
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert (saved_state is not None)
if (self_attn_padding_mask is not None):
self_attn_state = [saved_state['prev_key'], saved_state['prev_value'], saved_state['prev_key_padding_mask']]
else:
self_attn_state = [saved_state['prev_key'], saved_state['prev_value']]
return (x, attn, self_attn_state)
return (x, attn, None)
def make_generation_fast_(self, need_attn: bool=False, **kwargs):
self.need_attn = need_attn
def compute_macs_params(self, T=1, S=1):
macs = 0
n_params = 0
macs_attn = 0
n_params += sum([p.numel() for p in self.self_attn_layer_norm.parameters()])
n_params += sum([p.numel() for p in self.final_layer_norm.parameters()])
self_attn_layer = self.self_attn.compute_macs_params(T=T, S=T)
macs += self_attn_layer['macs']
n_params += self_attn_layer['params']
macs_attn += self_attn_layer['macs_attn']
if (self.encoder_attn is not None):
enc_attn = self.encoder_attn.compute_macs_params(T=T, S=S)
macs += enc_attn['macs']
n_params += enc_attn['params']
macs_attn += enc_attn['macs_attn']
fc1_params = sum([p.numel() for p in self.fc1.parameters()])
macs += (fc1_params * T)
n_params += fc1_params
fc2_params = sum([p.numel() for p in self.fc2.parameters()])
macs += (fc2_params * T)
n_params += fc2_params
return {'name': self.__class__.__name__, 'macs': macs, 'params': n_params, 'macs_attn': macs_attn}
|
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
|
class TransformerSentenceEncoderLayer(nn.Module):
'\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n '
def __init__(self, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None):
'\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n '
residual = x
(x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.final_layer_norm(x)
return (x, attn)
|
def unfold1d(x, kernel_size, padding_l, pad_value=0):
'unfold T x B x C to T x B x C x K'
if (kernel_size > 1):
(T, B, C) = x.size()
x = F.pad(x, (0, 0, 0, 0, padding_l, ((kernel_size - 1) - padding_l)), value=pad_value)
x = x.as_strided((T, B, C, kernel_size), ((B * C), C, 1, (B * C)))
else:
x = x.unsqueeze(3)
return x
|
def _pair(v):
if isinstance(v, Iterable):
assert (len(v) == 2), 'len(v) != 2'
return v
return tuple(repeat(v, 2))
|
def infer_conv_output_dim(conv_op, input_dim, sample_inchannel):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim)
x = conv_op(x)
x = x.transpose(1, 2)
(bsz, seq) = x.size()[:2]
per_channel_dim = x.size()[3]
return (x.contiguous().view(bsz, seq, (- 1)).size((- 1)), per_channel_dim)
|
class VGGBlock(torch.nn.Module):
'\n VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf\n\n Args:\n in_channels: (int) number of input channels (typically 1)\n out_channels: (int) number of output channels\n conv_kernel_size: convolution channels\n pooling_kernel_size: the size of the pooling window to take a max over\n num_conv_layers: (int) number of convolution layers\n input_dim: (int) input dimension\n conv_stride: the stride of the convolving kernel.\n Can be a single number or a tuple (sH, sW) Default: 1\n padding: implicit paddings on both sides of the input.\n Can be a single number or a tuple (padH, padW). Default: None\n layer_norm: (bool) if layer norm is going to be applied. Default: False\n\n Shape:\n Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)\n Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)\n '
def __init__(self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False):
assert (input_dim is not None), 'Need input_dim for LayerNorm and infer_conv_output_dim'
super(VGGBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_kernel_size = _pair(conv_kernel_size)
self.pooling_kernel_size = _pair(pooling_kernel_size)
self.num_conv_layers = num_conv_layers
self.padding = (tuple(((e // 2) for e in self.conv_kernel_size)) if (padding is None) else _pair(padding))
self.conv_stride = _pair(conv_stride)
self.layers = nn.ModuleList()
for layer in range(num_conv_layers):
conv_op = nn.Conv2d((in_channels if (layer == 0) else out_channels), out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding)
self.layers.append(conv_op)
if layer_norm:
(conv_output_dim, per_channel_dim) = infer_conv_output_dim(conv_op, input_dim, (in_channels if (layer == 0) else out_channels))
self.layers.append(nn.LayerNorm(per_channel_dim))
input_dim = per_channel_dim
self.layers.append(nn.ReLU())
if (self.pooling_kernel_size is not None):
pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True)
self.layers.append(pool_op)
(self.total_output_dim, self.output_dim) = infer_conv_output_dim(pool_op, input_dim, out_channels)
def forward(self, x):
for (i, _) in enumerate(self.layers):
x = self.layers[i](x)
return x
|
@register_optimizer('adadelta')
class Adadelta(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-06, metavar='EPS', help='term added to the denominator to improve numerical stability')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay}
@property
def supports_flat_params(self):
return True
|
@register_optimizer('adafactor')
class FairseqAdafactor(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adafactor(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar='E', help='epsilons for Adafactor optimizer')
parser.add_argument('--clip-threshold', type=float, default=1.0, metavar='C', help='threshold for clipping update root mean square')
parser.add_argument('--decay-rate', type=float, default=(- 0.8), metavar='D', help='decay rate of the second moment estimator')
parser.add_argument('--beta1', type=float, default=None, metavar='B', help='beta for first moment estimator. Optional')
parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter.')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule')
parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep.If false, external learning rate applied')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n Note : Convergence issues empirically observed with fp16 on.\n Might require search for appropriate configuration.\n '
return {'lr': self.args.lr[0], 'eps': eval(self.args.adafactor_eps), 'clip_threshold': self.args.clip_threshold, 'beta1': self.args.beta1, 'decay_rate': self.args.decay_rate, 'scale_parameter': self.args.scale_parameter, 'weight_decay': self.args.weight_decay, 'relative_step': self.args.relative_step, 'warmup_init': self.args.warmup_init}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.