code stringlengths 17 6.64M |
|---|
@register_model_architecture('lightconv_lm', 'lightconv_lm')
def base_lm_architecture(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.character_embeddings = getattr(args, 'character_embeddings', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.decoder_normalize_before = True
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31])
if (len(args.decoder_kernel_size_list) == 1):
args.decoder_kernel_size_list = (args.decoder_kernel_size_list * args.decoder_layers)
|
@register_model_architecture('lightconv_lm', 'lightconv_lm_gbw')
def lightconv_lm_gbw(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_lm_architecture(args)
|
@register_model('multilingual_transformer')
class MultilingualTransformerModel(FairseqMultiModel):
'Train Transformer models for multiple language pairs simultaneously.\n\n Requires `--task multilingual_translation`.\n\n We inherit all arguments from TransformerModel and assume that all language\n pairs use a single Transformer architecture. In addition, we provide several\n options that are specific to the multilingual setting.\n\n Args:\n --share-encoder-embeddings: share encoder embeddings across all source languages\n --share-decoder-embeddings: share decoder embeddings across all target languages\n --share-encoders: share all encoder params (incl. embeddings) across all source languages\n --share-decoders: share all decoder params (incl. embeddings) across all target languages\n '
def __init__(self, encoders, decoders):
super().__init__(encoders, decoders)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
TransformerModel.add_args(parser)
parser.add_argument('--share-encoder-embeddings', action='store_true', help='share encoder embeddings across languages')
parser.add_argument('--share-decoder-embeddings', action='store_true', help='share decoder embeddings across languages')
parser.add_argument('--share-encoders', action='store_true', help='share encoders across languages')
parser.add_argument('--share-decoders', action='store_true', help='share decoders across languages')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
base_multilingual_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = 1024
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = 1024
src_langs = [lang_pair.split('-')[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split('-')[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
(shared_encoder_embed_tokens, shared_decoder_embed_tokens) = (None, None)
if args.share_all_embeddings:
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=task.langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=src_langs, embed_dim=args.encoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.encoder_embed_path)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(dicts=task.dicts, langs=tgt_langs, embed_dim=args.decoder_embed_dim, build_embedding=build_embedding, pretrained_embed_path=args.decoder_embed_path)
(lang_encoders, lang_decoders) = ({}, {})
def get_encoder(lang):
if (lang not in lang_encoders):
if (shared_encoder_embed_tokens is not None):
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(task.dicts[lang], args.encoder_embed_dim, args.encoder_embed_path)
lang_encoders[lang] = TransformerEncoder(args, task.dicts[lang], encoder_embed_tokens)
return lang_encoders[lang]
def get_decoder(lang):
if (lang not in lang_decoders):
if (shared_decoder_embed_tokens is not None):
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(task.dicts[lang], args.decoder_embed_dim, args.decoder_embed_path)
lang_decoders[lang] = TransformerDecoder(args, task.dicts[lang], decoder_embed_tokens)
return lang_decoders[lang]
(shared_encoder, shared_decoder) = (None, None)
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
(encoders, decoders) = (OrderedDict(), OrderedDict())
for (lang_pair, src, tgt) in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (shared_encoder if (shared_encoder is not None) else get_encoder(src))
decoders[lang_pair] = (shared_decoder if (shared_decoder is not None) else get_decoder(tgt))
return MultilingualTransformerModel(encoders, decoders)
def load_state_dict(self, state_dict, strict=True):
state_dict_subset = state_dict.copy()
for (k, _) in state_dict.items():
assert k.startswith('models.')
lang_pair = k.split('.')[1]
if (lang_pair not in self.models):
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict)
|
@register_model_architecture('multilingual_transformer', 'multilingual_transformer')
def base_multilingual_architecture(args):
base_architecture(args)
args.share_encoder_embeddings = getattr(args, 'share_encoder_embeddings', False)
args.share_decoder_embeddings = getattr(args, 'share_decoder_embeddings', False)
args.share_encoders = getattr(args, 'share_encoders', False)
args.share_decoders = getattr(args, 'share_decoders', False)
|
@register_model_architecture('multilingual_transformer', 'multilingual_transformer_iwslt_de_en')
def multilingual_transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_multilingual_architecture(args)
|
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
'\n Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).\n\n Args:\n roberta (RobertaHubInterface): RoBERTa instance\n bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`\n other_tokens (List[str]): other tokens of shape `(T_words)`\n\n Returns:\n List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.\n '
assert (bpe_tokens.dim() == 1)
def clean(text):
return text.strip()
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [clean((roberta.bpe.decode(x) if (x not in {'<s>', ''}) else x)) for x in bpe_tokens]
other_tokens = [clean(str(o)) for o in other_tokens]
assert (bpe_tokens[0] == '<s>')
bpe_tokens = bpe_tokens[1:]
assert (''.join(bpe_tokens) == ''.join(other_tokens))
alignment = []
bpe_toks = filter((lambda item: (item[1] != '')), enumerate(bpe_tokens, start=1))
(j, bpe_tok) = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok):]
try:
(j, bpe_tok) = next(bpe_toks)
except StopIteration:
(j, bpe_tok) = (None, None)
elif bpe_tok.startswith(other_tok):
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok):]
other_tok = ''
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if (other_tok == ''):
break
assert (len(bpe_indices) > 0)
alignment.append(bpe_indices)
assert (len(alignment) == len(other_tokens))
return alignment
|
def align_features_to_words(roberta, features, alignment):
'\n Align given features to words.\n\n Args:\n roberta (RobertaHubInterface): RoBERTa instance\n features (torch.Tensor): features to align of shape `(T_bpe x C)`\n alignment: alignment between BPE tokens and words returned by\n func:`align_bpe_to_words`.\n '
assert (features.dim() == 2)
bpe_counts = Counter((j for bpe_indices in alignment for j in bpe_indices))
assert (bpe_counts[0] == 0)
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = (features / denom.unsqueeze((- 1)))
output = [weighted_features[0]]
largest_j = (- 1)
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range((largest_j + 1), len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all((torch.abs((output.sum(dim=0) - features.sum(dim=0))) < 0.0001))
return output
|
def spacy_nlp():
if (getattr(spacy_nlp, '_nlp', None) is None):
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_nlp._nlp
|
def spacy_tokenizer():
if (getattr(spacy_tokenizer, '_tokenizer', None) is None):
try:
nlp = spacy_nlp()
spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError('Please install spacy with: pip install spacy')
return spacy_tokenizer._tokenizer
|
@register_model('transformer_from_pretrained_xlm')
class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
TransformerModel.add_args(parser)
parser.add_argument('--pretrained-xlm-checkpoint', type=str, metavar='STR', help='XLM model to use for initializing transformer encoder and/or decoder')
parser.add_argument('--init-encoder-only', action='store_true', help="if set, don't load the XLM weights and embeddings into decoder")
parser.add_argument('--init-decoder-only', action='store_true', help="if set, don't load the XLM weights and embeddings into encoder")
@classmethod
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, 'pretrained_xlm_checkpoint'), 'You must specify a path for --pretrained-xlm-checkpoint to use --arch transformer_from_pretrained_xlm'
assert (isinstance(task.source_dictionary, cls_dictionary) and isinstance(task.target_dictionary, cls_dictionary)), 'You should use a MaskedLMDictionary when using --arch transformer_from_pretrained_xlm because the pretrained XLM model was trained using data binarized with MaskedLMDictionary. For translation, you may want to use --task translation_from_pretrained_xlm'
assert (not (getattr(args, 'init_encoder_only', False) and getattr(args, 'init_decoder_only', False))), 'Only one of --init-encoder-only and --init-decoder-only can be set.'
return super().build_model(args, task)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens)
|
def upgrade_state_dict_with_xlm_weights(state_dict: Dict[(str, Any)], pretrained_xlm_checkpoint: str) -> Dict[(str, Any)]:
'\n Load XLM weights into a Transformer encoder or decoder model.\n\n Args:\n state_dict: state dict for either TransformerEncoder or\n TransformerDecoder\n pretrained_xlm_checkpoint: checkpoint to load XLM weights from\n\n Raises:\n AssertionError: If architecture (num layers, attention heads, etc.)\n does not match between the current Transformer encoder or\n decoder and the pretrained_xlm_checkpoint\n '
if (not os.path.exists(pretrained_xlm_checkpoint)):
raise IOError('Model file not found: {}'.format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state['model']
for key in xlm_state_dict.keys():
for search_key in ['embed_tokens', 'embed_positions', 'layers']:
if (search_key in key):
subkey = key[key.find(search_key):]
assert (subkey in state_dict), '{} Transformer encoder / decoder state_dict does not contain {}. Cannot load {} from pretrained XLM checkpoint {} into Transformer.'.format(str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
|
class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, 'init_decoder_only', False):
return
assert hasattr(args, 'pretrained_xlm_checkpoint'), '--pretrained-xlm-checkpoint must be specified to load Transformer encoder from pretrained XLM'
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
|
class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
if getattr(args, 'init_encoder_only', False):
return
assert hasattr(args, 'pretrained_xlm_checkpoint'), '--pretrained-xlm-checkpoint must be specified to load Transformer decoder from pretrained XLM'
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(state_dict=self.state_dict(), pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
|
@register_model_architecture('transformer_from_pretrained_xlm', 'transformer_from_pretrained_xlm')
def base_architecture(args):
transformer_base_architecture(args)
|
@register_model('transformer_lm')
class TransformerLanguageModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
return {'transformer_lm.gbw.adaptive_huge': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2', 'transformer_lm.wiki103.adaptive': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.tar.bz2', 'transformer_lm.wmt19.en': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2', 'transformer_lm.wmt19.de': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2', 'transformer_lm.wmt19.ru': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2'}
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--no-decoder-final-norm', action='store_true', help="don't add an extra layernorm after the last decoder block")
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', action='store_true', help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings')
parser.add_argument('--character-embedding-dim', default=4, type=int, metavar='N', help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', default=2, type=int, metavar='N', help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', action='store_true', help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0, help='LayerDrop probability for decoder')
parser.add_argument('--decoder-layers-to-keep', default=None, help='which layers to *keep* when pruning as a comma-separated list')
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(','))
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.source_dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.source_dictionary), args.decoder_input_dim, task.source_dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert (args.adaptive_input_factor == args.adaptive_softmax_factor)
assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert (args.decoder_input_dim == args.decoder_output_dim)
decoder = TransformerDecoder(args, task.target_dictionary, embed_tokens, no_encoder_attn=True)
return TransformerLanguageModel(decoder)
|
@register_model_architecture('transformer_lm', 'transformer_lm')
def base_lm_architecture(args):
if hasattr(args, 'no_tie_adaptive_proj'):
args.no_decoder_final_norm = True
if (args.no_tie_adaptive_proj is False):
args.tie_adaptive_proj = True
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = (not args.decoder_final_norm)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.add_bos_token = getattr(args, 'add_bos_token', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.character_embeddings = getattr(args, 'character_embeddings', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
|
@register_model_architecture('transformer_lm', 'transformer_lm_big')
def transformer_lm_big(args):
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_wiki103')
@register_model_architecture('transformer_lm', 'transformer_lm_baevski_wiki103')
def transformer_lm_baevski_wiki103(args):
args.decoder_layers = getattr(args, 'decoder_layers', 16)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.dropout = getattr(args, 'dropout', 0.3)
args.adaptive_input = getattr(args, 'adaptive_input', True)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', True)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', '20000,60000')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '20000,60000')
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0.2)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', True)
transformer_lm_big(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gbw')
@register_model_architecture('transformer_lm', 'transformer_lm_baevski_gbw')
def transformer_lm_baevski_gbw(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
transformer_lm_big(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt')
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_small')
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_layers = getattr(args, 'decoder_layers', 24)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_medium')
def transformer_lm_gpt2_medium(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1280)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 5120)
args.decoder_layers = getattr(args, 'decoder_layers', 36)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 20)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_big')
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1600)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 6400)
args.decoder_layers = getattr(args, 'decoder_layers', 48)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 25)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
|
class AdaptiveInput(nn.Module):
def __init__(self, vocab_size: int, padding_idx: int, initial_dim: int, factor: float, output_dim: int, cutoff: List[int]):
super().__init__()
if (vocab_size > cutoff[(- 1)]):
cutoff = (cutoff + [vocab_size])
else:
assert (vocab_size == cutoff[(- 1)]), 'cannot specify cutoff larger than vocab size'
self.cutoff = cutoff
self.embedding_dim = output_dim
self.padding_idx = padding_idx
self.embeddings = nn.ModuleList()
for i in range(len(self.cutoff)):
prev = (self.cutoff[(i - 1)] if (i > 0) else 0)
size = (self.cutoff[i] - prev)
dim = int((initial_dim // (factor ** i)))
seq = nn.Sequential(nn.Embedding(size, dim, padding_idx), nn.Linear(dim, output_dim, bias=False))
self.embeddings.append(seq)
def init_weights(m):
if isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, mean=0, std=(m.weight.shape[1] ** (- 0.5)))
nn.init.constant_(m.weight[padding_idx], 0)
elif hasattr(m, 'weight'):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def weights_for_band(self, band: int):
return (self.embeddings[band][0].weight, self.embeddings[band][1].weight)
def forward(self, input: torch.Tensor):
result = self._float_tensor.new((input.shape + (self.embedding_dim,)))
for i in range(len(self.cutoff)):
mask = input.lt(self.cutoff[i])
if (i > 0):
mask.mul_(input.ge(self.cutoff[(i - 1)]))
chunk_input = (input[mask] - self.cutoff[(i - 1)])
else:
chunk_input = input[mask]
if mask.any():
result[mask] = self.embeddings[i](chunk_input)
return result
|
class ConvTBC(torch.nn.Module):
'1D convolution over an input of shape (time x batch x channel)\n\n The implementation uses gemm to perform the convolution. This implementation\n is faster than cuDNN for small kernel sizes.\n '
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(torch.Tensor(self.kernel_size[0], in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
def __repr__(self):
s = '{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, padding={padding}'
if (self.bias is None):
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
|
class DropoutSelect(nn.Module):
'docstring for D'
def __init__(self, dropout_type, dropout_gama=0.5, inplace=False):
super().__init__()
self.dropout_type = dropout_type
self.dropout_gama = dropout_gama
self.inplace = inplace
dropout_alpha = 1.0
self.dropout_alpha = dropout_alpha
if (1 == 'bernoulli'):
self.dist = torch.distributions.bernoulli.Bernoulli(torch.tensor([dropout_gama]))
elif (dropout_type == 'gamma'):
self.dist = torch.distributions.gamma.Gamma(torch.tensor([dropout_alpha]), torch.tensor([dropout_gama]))
elif (dropout_type == 'gumbel'):
self.dist = torch.distributions.gumbel.Gumbel(torch.tensor([0.0]), torch.tensor([dropout_gama]))
elif (dropout_type == 'beta'):
self.dist = torch.distributions.beta.Beta(torch.tensor([dropout_alpha]), torch.tensor([dropout_gama]))
elif (dropout_type == 'laplace'):
self.dist = torch.distributions.laplace.Laplace(torch.tensor([0.0]), torch.tensor([dropout_gama]))
elif (dropout_type == 'chi'):
self.dist = torch.distributions.chi2.Chi2(torch.tensor([dropout_gama]))
elif (dropout_type == 'normal'):
self.dist = torch.distributions.normal.Normal(torch.tensor([0.0]), torch.tensor([dropout_gama]))
def extra_repr(self):
return 'dropout_type={dropout_type}, dropout_gama={dropout_gama}, inplace={inplace}'.format(**self.__dict__)
def forward(self, x, p, training=True):
if (training is False):
return x
if (self.dropout_type == 'none'):
return F.dropout(x, p=p, training=True, inplace=self.inplace)
elif (self.dropout_type == 'bernoulli'):
noise = self.dist.expand(x.shape).sample().to(x.device)
scale = (p / self.dropout_gama)
x = ((x * noise) * scale)
else:
noise = self.dist.expand(x.shape).sample().to(x.device)
if (self.dropout_type == 'gamma'):
scale = ((p - (self.dropout_alpha * self.dropout_gama)) * (self.dropout_alpha ** (- 0.5)))
elif (self.dropout_type == 'gumbel'):
scale = (((6 ** 0.5) * (p - (0.5772 * self.dropout_gama))) / np.pi)
elif (self.dropout_type == 'beta'):
scale = (((self.dropout_alpha + self.dropout_gama) * ((((self.dropout_alpha + self.dropout_gama) + 1) / self.dropout_alpha) ** 0.5)) * (p - (self.dropout_alpha / (self.dropout_alpha + self.dropout_gama))))
elif (self.dropout_type == 'chi'):
scale = ((p - self.dropout_gama) / (2 ** 0.5))
elif (self.dropout_type == 'normal'):
scale = p
x = (x + (noise * scale))
return x
|
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
blocks = [32, 64, 128, 256]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "dynamicconv_cuda.cuh"\n\nstd::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = weight.size(1);\n const auto filterSize = weight.size(2);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n const dim3 blocks(minibatch, numFeatures);\n\n auto output = at::zeros_like(input);\n auto stream = at::cuda::getCurrentCUDAStream();\n'
switch = '\n switch(filterSize) {\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {pad}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{\n dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n weight.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n output.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;\n }\n break;\n\n'
end = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;\n }\n\n return {output};\n}\n'
with open('dynamicconv_cuda_forward.cu', 'w') as forward:
forward.write(head)
forward.write(switch)
for k in kernels:
b_size = 32
for b in blocks:
if (b > k):
b_size = b
break
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=b_size, pad=pad))
forward.write(bad_padding)
forward.write(end)
|
def gen_backward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
thresh = [512, 512, 512, 512, 512, 380, 256, 256]
min_block = [64, 64, 64, 64, 64, 64, 128, 256]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "dynamicconv_cuda.cuh"\n\nstd::vector<at::Tensor> dynamicconv_cuda_backward(at::Tensor gradOutput, int padding_l, at::Tensor input, at::Tensor weight) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = weight.size(1);\n const auto filterSize = weight.size(2);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n auto numChunks = 1;\n\n auto gradInput = at::zeros_like(input);\n auto gradWeight = at::zeros_like(weight);\n auto stream = at::cuda::getCurrentCUDAStream();\n\n dim3 blocks(minibatch, numHeads, numChunks);\n'
sequence_if = '\n if (sequenceLength < {seq}) {{\n switch(filterSize) {{\n'
case_k = '\n case {k}:\n'
chunks_reset = '\n numChunks = int(ceilf(sequenceLength/float({b_size})));\n blocks = dim3(minibatch, numHeads, numChunks);\n'
main_block = '\n if (padding_l == {p}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), "dynamicconv_backward", ([&] {{\n dynamicconv_backward_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n gradOutput.data<scalar_t>(),\n input.data<scalar_t>(),\n weight.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n gradWeight.data<scalar_t>(),\n gradInput.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;\n }\n break;\n\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;\n }\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
last_return = '\n }\n return {gradInput, gradWeight};\n}\n'
with open('dynamicconv_cuda_backward.cu', 'w') as backward:
backward.write(head)
for seq in seqs:
backward.write(sequence_if.format(seq=seq))
for (k, t, m) in zip(kernels, thresh, min_block):
backward.write(case_k.format(k=k))
if (seq <= t):
b_size = seq
else:
b_size = m
backward.write(chunks_reset.format(b_size=b_size))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=b_size, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(con_else)
backward.write(final_else)
for (k, m) in zip(kernels, min_block):
backward.write(case_k.format(k=k))
backward.write(chunks_reset.format(b_size=m))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=m, p=p))
backward.write(bad_padding)
backward.write(bad_filter)
backward.write(last_return)
|
def gelu_accurate(x):
if (not hasattr(gelu_accurate, '_a')):
gelu_accurate._a = math.sqrt((2 / math.pi))
return ((0.5 * x) * (1 + torch.tanh((gelu_accurate._a * (x + (0.044715 * torch.pow(x, 3)))))))
|
def gelu(x: torch.Tensor) -> torch.Tensor:
if hasattr(torch.nn.functional, 'gelu'):
return torch.nn.functional.gelu(x.float()).type_as(x)
else:
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return ((grad * ctx.scale), None)
|
class Highway(torch.nn.Module):
'\n A `Highway layer <https://arxiv.org/abs/1505.00387>`_.\n Adopted from the AllenNLP implementation.\n '
def __init__(self, input_dim: int, num_layers: int=1):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList([nn.Linear(input_dim, (input_dim * 2)) for _ in range(num_layers)])
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
nn.init.constant_(layer.bias[self.input_dim:], 1)
nn.init.constant_(layer.bias[:self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
(proj_x, gate) = projection.chunk(2, dim=(- 1))
proj_x = self.activation(proj_x)
gate = torch.sigmoid(gate)
x = ((gate * x) + ((gate.new_tensor([1]) - gate) * proj_x))
return x
|
def LayerNorm(normalized_shape, eps=1e-05, elementwise_affine=True, export=False):
if ((not export) and torch.cuda.is_available()):
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
|
class LearnedPositionalEmbedding(nn.Embedding):
'\n This module learns positional embeddings up to a fixed maximum size.\n Padding ids are ignored by either offsetting based on padding_idx\n or by setting padding_idx to None and ensuring that the appropriate\n position ids are passed to the forward function.\n '
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
def forward(self, input, incremental_state=None, positions=None):
'Input is expected to be of size [bsz x seqlen].'
assert ((positions is None) or (self.padding_idx is None)), 'If positions is pre-computed then padding_idx should not be set.'
if (positions is None):
if (incremental_state is not None):
positions = input.data.new(1, 1).fill_(int((self.padding_idx + input.size(1))))
else:
positions = utils.make_positions(input, self.padding_idx, onnx_trace=self.onnx_trace)
return super().forward(positions)
def max_positions(self):
'Maximum number of supported positions.'
if (self.padding_idx is not None):
return ((self.num_embeddings - self.padding_idx) - 1)
else:
return self.num_embeddings
|
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "lightconv_cuda.cuh"\n\nstd::vector<at::Tensor> lightconv_cuda_forward(at::Tensor input, at::Tensor filters, int padding_l) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = filters.size(0);\n const auto filterSize = filters.size(1);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n\n const dim3 blocks(minibatch, numFeatures);\n\n auto output = at::zeros_like(input);\n auto stream = at::cuda::getCurrentCUDAStream();\n'
sequence_if = '\n if (sequenceLength <= {seq}) {{\n switch(filterSize) {{\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {pad}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_forward", ([&] {{\n lightconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n filters.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n output.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;\n }\n break;\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;\n }\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
final_return = '\n }\n\n return {output};\n}\n'
with open('lightconv_cuda_forward.cu', 'w') as forward:
forward.write(head)
for seq in seqs:
forward.write(sequence_if.format(seq=seq))
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(con_else)
forward.write(final_else)
for k in kernels:
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=seq, pad=pad))
forward.write(bad_padding)
forward.write(bad_filter)
forward.write(final_return)
|
def gen_backward():
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "lightconv_cuda.cuh"\n\nstd::vector<at::Tensor> lightconv_cuda_backward(\n at::Tensor gradOutput,\n int padding_l,\n at::Tensor input,\n at::Tensor filters) {\n\n // gradWrtInput\n const int minibatch = input.size(0);\n const int numFeatures = input.size(1);\n const int sequenceLength = input.size(2);\n\n const int numHeads = filters.size(0);\n const int filterSize = filters.size(1);\n\n const dim3 gradBlocks(minibatch, numFeatures);\n const dim3 weightGradFirstpassShortBlocks(minibatch, numHeads);\n const dim3 weightGradSecondpassBlocks(numHeads, filterSize);\n\n const int numFiltersInBlock = numFeatures / numHeads;\n\n auto gradInput = at::zeros_like(input);\n auto gradFilters = at::zeros_like(filters);\n\n at::DeviceGuard g(input.device());\n auto stream = at::cuda::getCurrentCUDAStream();\n\n switch(filterSize) {\n'
sequence_if = '\n if (sequenceLength <= {seq}) {{\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {p}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "lightconv_backward", ([&] {{\n lightconv_grad_wrt_input_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<gradBlocks, {b_size}, 0, stream>>>(\n gradOutput.data<scalar_t>(),\n filters.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n gradInput.data<scalar_t>());\n\n'
weight_grad_short = '\n at::Tensor tempSumGradFilters = at::zeros({{minibatch, numHeads, filterSize}}, input.options().dtype(at::kFloat));\n lightconv_grad_wrt_weights_firstpass_short_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<weightGradFirstpassShortBlocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n gradOutput.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n tempSumGradFilters.data<float>()\n );\n\n lightconv_grad_wrt_weights_secondpass_short_kernel<{k}, {b_size}, scalar_t>\n <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(\n tempSumGradFilters.data<float>(),\n minibatch,\n numFiltersInBlock,\n gradFilters.data<scalar_t>()\n );\n }}));\n }} else\n'
weight_grad = '\n at::Tensor tempSumGradFilters = at::zeros({{minibatch, numFeatures, filterSize}}, input.options().dtype(at::kFloat));\n lightconv_grad_wrt_weights_firstpass_kernel<{k}, {b_size}, {p}, scalar_t>\n <<<gradBlocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n gradOutput.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n tempSumGradFilters.data<float>()\n );\n\n lightconv_grad_wrt_weights_secondpass_kernel<{k}, {b_size}, scalar_t>\n <<<weightGradSecondpassBlocks, {b_size}, 0, stream>>>(\n tempSumGradFilters.data<float>(),\n minibatch,\n numFiltersInBlock,\n gradFilters.data<scalar_t>()\n );\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping backward pass" << std::endl;\n }\n'
breakout = '\n break;\n'
bad_filter = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping backward pass" << std::endl;\n'
con_else = '\n } else\n'
final_else = '\n {\n switch(filterSize) {\n'
last_return = '\n }\n return {gradInput, gradFilters};\n}\n'
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
seqs = [(32 * x) for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]]
thresh = [32, 32, 64, 128, 256, (- 1), (- 1), (- 1)]
max_mem = [(- 1), (- 1), (- 1), (- 1), (- 1), 192, 96, 64]
with open('lightconv_cuda_backward.cu', 'w') as backward:
backward.write(head)
for (k, t, mem) in zip(kernels, thresh, max_mem):
backward.write(case_k.format(k=k))
for seq in seqs:
if (((t == (- 1)) or (seq <= t)) and ((mem == (- 1)) or (seq < mem))):
backward.write(sequence_if.format(seq=seq))
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=seq, p=p))
backward.write(weight_grad_short.format(k=k, b_size=seq, p=p))
backward.write(bad_padding)
else:
for p in [(k // 2), (k - 1)]:
backward.write(main_block.format(k=k, b_size=32, p=p))
backward.write(weight_grad.format(k=k, b_size=32, p=p))
backward.write(bad_padding)
backward.write(breakout)
break
backward.write(con_else)
backward.write(bad_filter)
backward.write(last_return)
|
class LogSumExpMoE(torch.autograd.Function):
'Standard LogSumExp forward pass, but use *posterior* for the backward.\n\n See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"\n (Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.\n '
@staticmethod
def forward(ctx, logp, posterior, dim=(- 1)):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
(posterior,) = ctx.saved_tensors
grad_logp = (grad_output.unsqueeze(ctx.dim) * posterior)
return (grad_logp, None, None)
|
@register_model('masked_lm')
class MaskedLMModel(BaseFairseqModel):
'\n Class for training a Masked Language Model. It also supports an\n additional sentence level prediction if the sent-loss argument is set.\n '
def __init__(self, args, encoder):
super().__init__()
self.args = args
self.encoder = encoder
if getattr(args, 'apply_bert_init', False):
self.apply(init_bert_params)
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--act-dropout', type=float, metavar='D', help='dropout probability after activation in FFN')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--bias-kv', action='store_true', help='if set, adding a learnable bias kv')
parser.add_argument('--zero-attn', action='store_true', help='if set, pads attn with zero')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--share-encoder-input-output-embed', action='store_true', help='share encoder input and output embeddings')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--no-token-positional-embeddings', action='store_true', help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--num-segment', type=int, metavar='N', help='num segment in the input')
parser.add_argument('--sentence-class-num', type=int, metavar='N', help='number of classes for sentence task')
parser.add_argument('--sent-loss', action='store_true', help='if set, calculate sentence level predictions')
parser.add_argument('--apply-bert-init', action='store_true', help='use custom param initialization for BERT')
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--pooler-activation-fn', choices=utils.get_available_activation_fns(), help='Which activation function to use for pooler layer.')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
def forward(self, src_tokens, segment_labels=None, **kwargs):
return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs)
def max_positions(self):
return self.encoder.max_positions
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_architecture(args)
if (not hasattr(args, 'max_positions')):
args.max_positions = args.tokens_per_sample
print('Model args: ', args)
encoder = MaskedLMEncoder(args, task.dictionary)
return cls(args, encoder)
|
class MaskedLMEncoder(FairseqEncoder):
'\n Encoder for Masked Language Modelling.\n '
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.padding_idx = dictionary.pad()
self.vocab_size = dictionary.__len__()
self.max_positions = args.max_positions
self.sentence_encoder = TransformerSentenceEncoder(padding_idx=self.padding_idx, vocab_size=self.vocab_size, num_encoder_layers=args.encoder_layers, embedding_dim=args.encoder_embed_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.act_dropout, max_seq_len=self.max_positions, num_segments=args.num_segment, use_position_embeddings=(not args.no_token_positional_embeddings), encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, learned_pos_embedding=args.encoder_learned_pos, add_bias_kv=args.bias_kv, add_zero_attn=args.zero_attn)
self.share_input_output_embed = args.share_encoder_input_output_embed
self.embed_out = None
self.sentence_projection_layer = None
self.sentence_out_dim = args.sentence_class_num
self.lm_output_learned_bias = None
self.load_softmax = (not getattr(args, 'remove_head', False))
self.masked_lm_pooler = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn)
self.lm_head_transform_weight = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.activation_fn = utils.get_activation_fn(args.activation_fn)
self.layer_norm = LayerNorm(args.encoder_embed_dim)
self.lm_output_learned_bias = None
if self.load_softmax:
self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size))
if (not self.share_input_output_embed):
self.embed_out = nn.Linear(args.encoder_embed_dim, self.vocab_size, bias=False)
if args.sent_loss:
self.sentence_projection_layer = nn.Linear(args.encoder_embed_dim, self.sentence_out_dim, bias=False)
def forward(self, src_tokens, segment_labels=None, **unused):
"\n Forward pass for Masked LM encoder. This first computes the token\n embedding using the token embedding matrix, position embeddings (if\n specified) and segment embeddings (if specified).\n\n Here we assume that the sentence representation corresponds to the\n output of the classification_token (see bert_task or cross_lingual_lm\n task for more details).\n Args:\n - src_tokens: B x T matrix representing sentences\n - segment_labels: B x T matrix representing segment label for tokens\n Returns:\n - a tuple of the following:\n - logits for predictions in format B x T x C to be used in\n softmax afterwards\n - a dictionary of additional data, where 'pooled_output' contains\n the representation for classification_token and 'inner_states'\n is a list of internal model states used to compute the\n predictions (similar in ELMO). 'sentence_logits'\n is the prediction logit for NSP task and is only computed if\n this is specified in the input arguments.\n "
(inner_states, sentence_rep) = self.sentence_encoder(src_tokens, segment_labels=segment_labels)
x = inner_states[(- 1)].transpose(0, 1)
x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x)))
pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep))
if (self.share_input_output_embed and hasattr(self.sentence_encoder.embed_tokens, 'weight')):
x = F.linear(x, self.sentence_encoder.embed_tokens.weight)
elif (self.embed_out is not None):
x = self.embed_out(x)
if (self.lm_output_learned_bias is not None):
x = (x + self.lm_output_learned_bias)
sentence_logits = None
if self.sentence_projection_layer:
sentence_logits = self.sentence_projection_layer(pooled_output)
return (x, {'inner_states': inner_states, 'pooled_output': pooled_output, 'sentence_logits': sentence_logits})
def max_positions(self):
'Maximum output length supported by the encoder.'
return self.max_positions
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding):
state_dict[(name + '.sentence_encoder.embed_positions._float_tensor')] = torch.FloatTensor(1)
if (not self.load_softmax):
for k in list(state_dict.keys()):
if (('embed_out.weight' in k) or ('sentence_projection_layer.weight' in k) or ('lm_output_learned_bias' in k)):
del state_dict[k]
return state_dict
|
@register_model_architecture('masked_lm', 'masked_lm')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.act_dropout = getattr(args, 'act_dropout', 0.0)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.num_segment = getattr(args, 'num_segment', 2)
args.sentence_class_num = getattr(args, 'sentence_class_num', 2)
args.sent_loss = getattr(args, 'sent_loss', False)
args.apply_bert_init = getattr(args, 'apply_bert_init', False)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
|
@register_model_architecture('masked_lm', 'bert_base')
def bert_base_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.num_segment = getattr(args, 'num_segment', 2)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.sentence_class_num = getattr(args, 'sentence_class_num', 2)
args.sent_loss = getattr(args, 'sent_loss', True)
args.apply_bert_init = getattr(args, 'apply_bert_init', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
base_architecture(args)
|
@register_model_architecture('masked_lm', 'bert_large')
def bert_large_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_layers = getattr(args, 'encoder_layers', 24)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
bert_base_architecture(args)
|
@register_model_architecture('masked_lm', 'xlm_base')
def xlm_architecture(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', True)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.num_segment = getattr(args, 'num_segment', 1)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.bias_kv = getattr(args, 'bias_kv', False)
args.zero_attn = getattr(args, 'zero_attn', False)
args.sent_loss = getattr(args, 'sent_loss', False)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.apply_bert_init = getattr(args, 'apply_bert_init', True)
base_architecture(args)
|
class MeanPoolGatingNetwork(torch.nn.Module):
"A simple mean-pooling gating network for selecting experts.\n\n This module applies mean pooling over an encoder's output and returns\n reponsibilities for each expert. The encoder format is expected to match\n :class:`fairseq.models.transformer.TransformerEncoder`.\n "
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = (torch.nn.Dropout(dropout) if (dropout is not None) else None)
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if (not (isinstance(encoder_out, dict) and ('encoder_out' in encoder_out) and ('encoder_padding_mask' in encoder_out) and (encoder_out['encoder_out'].size(2) == self.embed_dim))):
raise ValueError('Unexpected format for encoder_out')
encoder_padding_mask = encoder_out['encoder_padding_mask']
encoder_out = encoder_out['encoder_out'].transpose(0, 1)
if (encoder_padding_mask is not None):
encoder_out = encoder_out.clone()
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum((~ encoder_padding_mask), dim=1, keepdim=True)
x = (torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out))
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if (self.dropout is not None):
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=(- 1), dtype=torch.float32).type_as(x)
|
class MultiheadAttention(nn.Module):
'Multi-headed attention.\n\n See "Attention Is All You Need" for more details.\n '
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = (kdim if (kdim is not None) else embed_dim)
self.vdim = (vdim if (vdim is not None) else embed_dim)
self.qkv_same_dim = ((self.kdim == embed_dim) and (self.vdim == embed_dim))
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = (embed_dim // num_heads)
assert ((self.head_dim * num_heads) == self.embed_dim), 'embed_dim must be divisible by num_heads'
self.scaling = (self.head_dim ** (- 0.5))
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert ((not self.self_attention) or self.qkv_same_dim), 'Self-attention requires query, key and value to be of the same size'
self.k_proj = nn.Linear(self.kdim, embed_dim, bias=bias)
self.v_proj = nn.Linear(self.vdim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
if hasattr(F, 'multi_head_attention_forward'):
self.enable_torch_version = True
else:
self.enable_torch_version = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.k_proj.weight, gain=(1 / math.sqrt(2)))
nn.init.xavier_uniform_(self.v_proj.weight, gain=(1 / math.sqrt(2)))
nn.init.xavier_uniform_(self.q_proj.weight, gain=(1 / math.sqrt(2)))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
if (self.bias_k is not None):
nn.init.xavier_normal_(self.bias_k)
if (self.bias_v is not None):
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False, attn_mask=None, before_softmax=False, need_head_weights=False):
'Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n '
if need_head_weights:
need_weights = True
(tgt_len, bsz, embed_dim) = query.size()
assert (embed_dim == self.embed_dim)
assert (list(query.size()) == [tgt_len, bsz, embed_dim])
if (self.enable_torch_version and (not self.onnx_trace) and (incremental_state is None) and (not static_kv)):
return F.multi_head_attention_forward(query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight)
if (incremental_state is not None):
saved_state = self._get_input_buffer(incremental_state)
if ('prev_key' in saved_state):
if static_kv:
assert (self.encoder_decoder_attention and (not self.self_attention))
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if (key is None):
assert (value is None)
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if (self.bias_k is not None):
assert (self.bias_v is not None)
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if (attn_mask is not None):
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if (key_padding_mask is not None):
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (k is not None):
k = k.contiguous().view((- 1), (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (v is not None):
v = v.contiguous().view((- 1), (bsz * self.num_heads), self.head_dim).transpose(0, 1)
if (saved_state is not None):
if ('prev_key' in saved_state):
prev_key = saved_state['prev_key'].view((bsz * self.num_heads), (- 1), self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if ('prev_value' in saved_state):
prev_value = saved_state['prev_value'].view((bsz * self.num_heads), (- 1), self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
key_padding_mask = self._append_prev_key_padding_mask(key_padding_mask=key_padding_mask, prev_key_padding_mask=saved_state.get('prev_key_padding_mask', None), batch_size=bsz, src_len=k.size(1), static_kv=static_kv)
saved_state['prev_key'] = k.view(bsz, self.num_heads, (- 1), self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, (- 1), self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
if ((key_padding_mask is not None) and (key_padding_mask.shape == torch.Size([]))):
key_padding_mask = None
if (key_padding_mask is not None):
assert (key_padding_mask.size(0) == bsz)
assert (key_padding_mask.size(1) == src_len)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros(((k.size(0), 1) + k.size()[2:]))], dim=1)
v = torch.cat([v, v.new_zeros(((v.size(0), 1) + v.size()[2:]))], dim=1)
if (attn_mask is not None):
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if (key_padding_mask is not None):
key_padding_mask = torch.cat([key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert (list(attn_weights.size()) == [(bsz * self.num_heads), tgt_len, src_len])
if (attn_mask is not None):
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if (key_padding_mask is not None):
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'))
attn_weights = attn_weights.view((bsz * self.num_heads), tgt_len, src_len)
if before_softmax:
return (attn_weights, v)
attn_weights_float = utils.softmax(attn_weights, dim=(- 1), onnx_trace=self.onnx_trace)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
attn = torch.bmm(attn_probs, v)
assert (list(attn.size()) == [(bsz * self.num_heads), tgt_len, self.head_dim])
if (self.onnx_trace and (attn.size(1) == 1)):
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if (not need_head_weights):
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return (attn, attn_weights)
@staticmethod
def _append_prev_key_padding_mask(key_padding_mask, prev_key_padding_mask, batch_size, src_len, static_kv):
if ((prev_key_padding_mask is not None) and static_kv):
key_padding_mask = prev_key_padding_mask
elif ((prev_key_padding_mask is not None) and (key_padding_mask is not None)):
key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)
elif (prev_key_padding_mask is not None):
filler = torch.zeros(batch_size, (src_len - prev_key_padding_mask.size(1))).bool()
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
key_padding_mask = torch.cat((prev_key_padding_mask, filler), dim=1)
elif (key_padding_mask is not None):
filler = torch.zeros(batch_size, (src_len - key_padding_mask.size(1))).bool()
if key_padding_mask.is_cuda:
filler = filler.cuda()
key_padding_mask = torch.cat((filler, key_padding_mask), dim=1)
return key_padding_mask
def reorder_incremental_state(self, incremental_state, new_order):
'Reorder buffered internal state (for incremental generation).'
input_buffer = self._get_input_buffer(incremental_state)
if (input_buffer is not None):
for k in input_buffer.keys():
if (input_buffer[k] is not None):
input_buffer[k] = input_buffer[k].index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return (utils.get_incremental_state(self, incremental_state, 'attn_state') or {})
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(self, incremental_state, 'attn_state', buffer)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = ((name + '.') if (name != '') else '')
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith((prefix + 'in_proj_weight')):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.weight')] = state_dict[k][:dim]
items_to_add[(prefix + 'k_proj.weight')] = state_dict[k][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.weight')] = state_dict[k][(2 * dim):]
keys_to_remove.append(k)
k_bias = (prefix + 'in_proj_bias')
if (k_bias in state_dict.keys()):
dim = int((state_dict[k].shape[0] / 3))
items_to_add[(prefix + 'q_proj.bias')] = state_dict[k_bias][:dim]
items_to_add[(prefix + 'k_proj.bias')] = state_dict[k_bias][dim:(2 * dim)]
items_to_add[(prefix + 'v_proj.bias')] = state_dict[k_bias][(2 * dim):]
keys_to_remove.append((prefix + 'in_proj_bias'))
for k in keys_to_remove:
del state_dict[k]
for (key, value) in items_to_add.items():
state_dict[key] = value
|
def NormSelect(norm_type, embed_dim, head_num=None, warmup_updates=1000):
if (norm_type == 'layer'):
return LayerNorm(embed_dim)
elif (norm_type == 'batch'):
return MaskSyncBatchNorm(embed_dim)
elif (norm_type == 'power'):
return MaskPowerNorm(embed_dim, group_num=head_num, warmup_iters=warmup_updates)
|
def tile(a, repeats, dim):
"\n Substitute for numpy's repeat function. Taken from https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853/2\n torch.repeat([1,2,3], 2) --> [1, 2, 3, 1, 2, 3]\n np.repeat([1,2,3], repeats=2, axis=0) --> [1, 1, 2, 2, 3, 3]\n\n :param a: tensor\n :param repeats: number of repeats\n :param dim: dimension where to repeat\n :return: tensor with repitions\n "
init_dim = a.size(dim)
repeat_idx = ([1] * a.dim())
repeat_idx[dim] = repeats
a = a.repeat(*repeat_idx)
if a.is_cuda:
order_index = torch.cuda.LongTensor(torch.cat([((init_dim * torch.arange(repeats, device=a.device)) + i) for i in range(init_dim)]))
else:
order_index = torch.LongTensor(torch.cat([((init_dim * torch.arange(repeats)) + i) for i in range(init_dim)]))
return torch.index_select(a, dim, order_index)
|
class GroupNorm(nn.Module):
'Applies Group Normalization over a mini-batch of inputs as described in\n the paper `Group Normalization`_ .\n\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n\n The input channels are separated into :attr:`num_groups` groups, each containing\n ``num_channels / num_groups`` channels. The mean and standard-deviation are calculated\n separately over the each group. :math:`\\gamma` and :math:`\\beta` are learnable\n per-channel affine transform parameter vectors of size :attr:`num_channels` if\n :attr:`affine` is ``True``.\n\n This layer uses statistics computed from input data in both training and\n evaluation modes.\n\n Args:\n num_groups (int): number of groups to separate the channels into\n num_channels (int): number of channels expected in input\n eps: a value added to the denominator for numerical stability. Default: 1e-5\n affine: a boolean value that when set to ``True``, this module\n has learnable per-channel affine parameters initialized to ones (for weights)\n and zeros (for biases). Default: ``True``.\n\n Shape:\n - Input: :math:`(N, C, *)` where :math:`C=\\text{num\\_channels}`\n - Output: :math:`(N, C, *)` (same shape as input)\n\n Examples::\n\n >>> input = torch.randn(20, 6, 10, 10)\n >>> # Separate 6 channels into 3 groups\n >>> m = nn.GroupNorm(3, 6)\n >>> # Separate 6 channels into 6 groups (equivalent with InstanceNorm)\n >>> m = nn.GroupNorm(6, 6)\n >>> # Put all 6 channels into a single group (equivalent with LayerNorm)\n >>> m = nn.GroupNorm(1, 6)\n >>> # Activating the module\n >>> output = m(input)\n\n .. _`Group Normalization`: https://arxiv.org/abs/1803.08494\n '
__constants__ = ['num_groups', 'num_channels', 'eps', 'affine', 'weight', 'bias']
def __init__(self, num_groups, num_channels, eps=1e-05, affine=True):
super(GroupNorm, self).__init__()
self.num_groups = num_groups
self.num_channels = num_channels
self.group_feature = (num_channels // num_groups)
self.eps = eps
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_channels))
self.bias = nn.Parameter(torch.Tensor(num_channels))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input, pad_mask=None, is_encoder=False):
shaped_input = (len(input.shape) == 2)
if shaped_input:
input = input.unsqueeze(0)
(T, B, C) = input.shape
input = input.contiguous().view((T * B), C)
input = F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
input = input.contiguous().view(T, B, C)
if shaped_input:
input = input.squeeze(0)
return input
def extra_repr(self):
return '{num_groups}, {num_channels}, eps={eps}, affine={affine}'.format(**self.__dict__)
|
class LayerNorm(nn.Module):
'Applies Layer Normalization over a mini-batch of inputs as described in\n the paper `Layer Normalization`_ .\n\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n\n The mean and standard-deviation are calculated separately over the last\n certain number dimensions which have to be of the shape specified by\n :attr:`normalized_shape`.\n :math:`\\gamma` and :math:`\\beta` are learnable affine transform parameters of\n :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``.\n\n .. note::\n Unlike Batch Normalization and Instance Normalization, which applies\n scalar scale and bias for each entire channel/plane with the\n :attr:`affine` option, Layer Normalization applies per-element scale and\n bias with :attr:`elementwise_affine`.\n\n This layer uses statistics computed from input data in both training and\n evaluation modes.\n\n Args:\n normalized_shape (int or list or torch.Size): input shape from an expected input\n of size\n\n .. math::\n [* \\times \\text{normalized\\_shape}[0] \\times \\text{normalized\\_shape}[1]\n \\times \\ldots \\times \\text{normalized\\_shape}[-1]]\n\n If a single integer is used, it is treated as a singleton list, and this module will\n normalize over the last dimension which is expected to be of that specific size.\n eps: a value added to the denominator for numerical stability. Default: 1e-5\n elementwise_affine: a boolean value that when set to ``True``, this module\n has learnable per-element affine parameters initialized to ones (for weights)\n and zeros (for biases). Default: ``True``.\n\n Shape:\n - Input: :math:`(N, *)`\n - Output: :math:`(N, *)` (same shape as input)\n\n Examples::\n\n >>> input = torch.randn(20, 5, 10, 10)\n >>> # With Learnable Parameters\n >>> m = nn.LayerNorm(input.size()[1:])\n >>> # Without Learnable Parameters\n >>> m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)\n >>> # Normalize over last two dimensions\n >>> m = nn.LayerNorm([10, 10])\n >>> # Normalize over last dimension of size 10\n >>> m = nn.LayerNorm(10)\n >>> # Activating the module\n >>> output = m(input)\n\n .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450\n '
__constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine']
def __init__(self, normalized_shape, eps=1e-05, elementwise_affine=True):
super(LayerNorm, self).__init__()
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.Tensor(*normalized_shape))
self.bias = nn.Parameter(torch.Tensor(*normalized_shape))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.elementwise_affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input, pad_mask=None, is_encoder=False):
return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps)
def extra_repr(self):
return '{normalized_shape}, eps={eps}, elementwise_affine={elementwise_affine}'.format(**self.__dict__)
|
def PositionalEmbedding(num_embeddings: int, embedding_dim: int, padding_idx: int, learned: bool=False):
if learned:
if (padding_idx is not None):
num_embeddings = ((num_embeddings + padding_idx) + 1)
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=(embedding_dim ** (- 0.5)))
if (padding_idx is not None):
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(embedding_dim, padding_idx, init_size=((num_embeddings + padding_idx) + 1))
return m
|
class ScalarBias(torch.autograd.Function):
'\n Adds a vector of scalars, used in self-attention mechanism to allow\n the model to optionally attend to this vector instead of the past\n '
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, (size[dim] - 1)).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return (grad.narrow(ctx.dim, 1, (grad.size(ctx.dim) - 1)), None, None)
|
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
|
class SparseMultiheadAttention(MultiheadAttention):
' Sparse Multi-Headed Attention.\n\n "Generating Long Sequences with Sparse Transformers". Implements\n fixed factorized self attention, where l=stride and c=expressivity.\n A(1) includes all words in the stride window and A(2) takes a summary of c\n words from the end of each stride window.\n If is_bidirectional=False, we do not include any words past the current word,\n as in the paper.\n '
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, stride=32, expressivity=8, is_bidirectional=True):
super().__init__(embed_dim, num_heads, kdim, vdim, dropout, bias, add_bias_kv, add_zero_attn, self_attention, encoder_decoder_attention)
self.is_bidirectional = is_bidirectional
self.stride = stride
self.expressivity = expressivity
assert ((self.stride > 0) and (self.stride >= self.expressivity))
def compute_checkpoint(self, word_index):
if (((word_index % self.stride) == 0) and (word_index != 0)):
checkpoint_index = (word_index - self.expressivity)
else:
checkpoint_index = (((math.floor((word_index / self.stride)) * self.stride) + self.stride) - self.expressivity)
return checkpoint_index
def compute_subset_summaries(self, absolute_max):
checkpoint_index = self.compute_checkpoint(0)
subset_two = set()
while (checkpoint_index <= (absolute_max - 1)):
summary = set(range(checkpoint_index, min(((checkpoint_index + self.expressivity) + 1), absolute_max)))
subset_two = subset_two.union(summary)
checkpoint_index = self.compute_checkpoint((checkpoint_index + self.stride))
return subset_two
def compute_fixed_attention_subset(self, word_index, tgt_len):
if (not self.is_bidirectional):
absolute_max = (word_index + 1)
else:
absolute_max = tgt_len
rounded_index = (math.floor(((word_index + self.stride) / self.stride)) * self.stride)
if (((word_index % self.stride) == 0) and (word_index != 0)):
subset_one = set(range((word_index - self.stride), min(absolute_max, (word_index + 1))))
else:
subset_one = set(range(max(0, (rounded_index - self.stride)), min(absolute_max, (rounded_index + 1))))
subset_two = set()
if (not self.is_bidirectional):
subset_two = self.compute_subset_summaries(absolute_max)
return subset_one.union(subset_two)
def buffered_sparse_mask(self, tensor, tgt_len, src_len):
assert (tgt_len > self.stride)
sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float('-inf'))
subset_summaries = set()
if self.is_bidirectional:
subset_summaries = self.compute_subset_summaries(tgt_len)
for i in range(tgt_len):
fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
included_word_indices = torch.LongTensor(list(fixed_attention_subset))
sparse_mask[i].index_fill_(0, included_word_indices, 0)
return sparse_mask.type_as(tensor)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
sparse_mask = sparse_mask.unsqueeze(0).expand((bsz * self.num_heads), tgt_len, src_len)
attn_weights += sparse_mask
|
class SparseTransformerSentenceEncoder(TransformerSentenceEncoder):
'\n Sparse implementation of the TransformerSentenceEncoder\n - see SparseMultiheadAttention\n '
def __init__(self, padding_idx: int, vocab_size: int, num_encoder_layers: int=6, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, max_seq_len: int=256, num_segments: int=2, use_position_embeddings: bool=True, offset_positions_by_padding: bool=True, encoder_normalize_before: bool=False, apply_bert_init: bool=False, activation_fn: str='relu', learned_pos_embedding: bool=True, add_bias_kv: bool=False, add_zero_attn: bool=False, embed_scale: float=None, freeze_embeddings: bool=False, n_trans_layers_to_freeze: int=0, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(padding_idx, vocab_size, num_encoder_layers, embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, max_seq_len, num_segments, use_position_embeddings, offset_positions_by_padding, encoder_normalize_before, apply_bert_init, activation_fn, learned_pos_embedding, add_bias_kv, add_zero_attn, embed_scale, freeze_embeddings, n_trans_layers_to_freeze, export)
self.layers = nn.ModuleList([SparseTransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=self.dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, export=export, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) for _ in range(num_encoder_layers)])
def freeze_module_params(m):
if (m is not None):
for p in m.parameters():
p.requires_grad = False
for layer in range(n_trans_layers_to_freeze):
freeze_module_params(self.layers[layer])
|
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
'\n Implements a Sprase Transformer Encoder Layer (see SparseMultiheadAttention)\n '
def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, add_bias_kv, add_zero_attn, export)
self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity)
|
class TransformerEncoderLayer(nn.Module):
'Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n '
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True)
self.self_attn_layer_norm = NormSelect(args.encoder_norm_self, self.embed_dim, args.encoder_attention_heads, args.warmup_updates)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = NormSelect(args.encoder_norm_ff, self.embed_dim, args.encoder_attention_heads, args.warmup_updates)
if args.encoder_spec_norm:
self.self_attn.q_proj = spectral_norm(self.self_attn.q_proj)
self.self_attn.v_proj = spectral_norm(self.self_attn.v_proj)
def upgrade_state_dict_named(self, state_dict, name):
'\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n '
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask=None):
'\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where\n T_tgt is the length of query, while T_src is the length of key,\n though here both query and key is x here,\n attn_mask[t_tgt, t_src] = 1 means when calculating embedding\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n '
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True, pad_mask=encoder_padding_mask)
if (attn_mask is not None):
attn_mask = attn_mask.masked_fill(attn_mask.bool(), (- 100000000.0))
(x, _) = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True, pad_mask=encoder_padding_mask)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True, pad_mask=encoder_padding_mask)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True, pad_mask=encoder_padding_mask)
return x
def maybe_layer_norm(self, layer_norm, x, before=False, after=False, pad_mask=None):
assert (before ^ after)
if (after ^ self.normalize_before):
return layer_norm(x, pad_mask=pad_mask, is_encoder=True)
else:
return x
|
class TransformerDecoderLayer(nn.Module):
'Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n '
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.self_attn = MultiheadAttention(embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=(not self.cross_self_attention))
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(activation=getattr(args, 'activation_fn', 'relu'))
self.activation_dropout = getattr(args, 'activation_dropout', 0)
if (self.activation_dropout == 0):
self.activation_dropout = getattr(args, 'relu_dropout', 0)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = NormSelect(args.decoder_norm_self, self.embed_dim, args.decoder_attention_heads, args.warmup_updates)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(self.embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True)
self.encoder_attn_layer_norm = NormSelect(args.decoder_norm_self, self.embed_dim, args.decoder_attention_heads, args.warmup_updates)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = NormSelect(args.decoder_norm_ff, self.embed_dim, args.decoder_attention_heads, args.warmup_updates)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, x, encoder_out=None, encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, need_attn=False, need_head_weights=False):
'\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n '
if need_head_weights:
need_attn = True
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True, pad_mask=self_attn_padding_mask)
if (prev_self_attn_state is not None):
if (incremental_state is None):
incremental_state = {}
(prev_key, prev_value) = prev_self_attn_state[:2]
saved_state = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_self_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_self_attn_state[2]
self.self_attn._set_input_buffer(incremental_state, saved_state)
if (self.cross_self_attention and (not ((incremental_state is not None) and ('prev_key' in self.self_attn._get_input_buffer(incremental_state))))):
if (self_attn_mask is not None):
self_attn_mask = torch.cat((x.new(x.size(0), encoder_out.size(0)).zero_(), self_attn_mask), dim=1)
if (self_attn_padding_mask is not None):
if (encoder_padding_mask is None):
encoder_padding_mask = self_attn_padding_mask.new(encoder_out.size(1), encoder_out.size(0)).zero_()
self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1)
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
(x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True, pad_mask=self_attn_padding_mask)
if (self.encoder_attn is not None):
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True, pad_mask=self_attn_padding_mask)
if (prev_attn_state is not None):
if (incremental_state is None):
incremental_state = {}
(prev_key, prev_value) = prev_attn_state[:2]
saved_state = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_attn_state[2]
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
(x, attn) = self.encoder_attn(query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True, pad_mask=encoder_padding_mask)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True, pad_mask=self_attn_padding_mask)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True, pad_mask=self_attn_padding_mask)
if (self.onnx_trace and (incremental_state is not None)):
saved_state = self.self_attn._get_input_buffer(incremental_state)
if (self_attn_padding_mask is not None):
self_attn_state = (saved_state['prev_key'], saved_state['prev_value'], saved_state['prev_key_padding_mask'])
else:
self_attn_state = (saved_state['prev_key'], saved_state['prev_value'])
return (x, attn, self_attn_state)
return (x, attn)
def maybe_layer_norm(self, layer_norm, x, before=False, after=False, pad_mask=None):
assert (before ^ after)
if (after ^ self.normalize_before):
return layer_norm(x, pad_mask=pad_mask, is_encoder=False)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
|
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
|
class TransformerSentenceEncoderLayer(nn.Module):
'\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n '
def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False, encoder_norm_self: str='layer', encoder_norm_ff: str='layer', encoder_normalize_before: bool=False) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True)
self.self_attn_layer_norm = NormSelect(encoder_norm_self, self.embedding_dim, num_attention_heads)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
self.final_layer_norm = NormSelect(encoder_norm_ff, self.embedding_dim, num_attention_heads)
self.encoder_normalize_before = encoder_normalize_before
def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None):
'\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n '
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True, pad_mask=self_attn_padding_mask)
(x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True, pad_mask=self_attn_padding_mask)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True, pad_mask=self_attn_padding_mask)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True, pad_mask=self_attn_padding_mask)
return (x, attn)
def maybe_layer_norm(self, norm_fn, x, before=False, after=False, pad_mask=None, is_encoder=True):
assert (before ^ after)
if (after ^ self.encoder_normalize_before):
return norm_fn(x)
else:
return x
|
def unfold1d(x, kernel_size, padding_l, pad_value=0):
'unfold T x B x C to T x B x C x K'
if (kernel_size > 1):
(T, B, C) = x.size()
x = F.pad(x, (0, 0, 0, 0, padding_l, ((kernel_size - 1) - padding_l)), value=pad_value)
x = x.as_strided((T, B, C, kernel_size), ((B * C), C, 1, (B * C)))
else:
x = x.unsqueeze(3)
return x
|
def _pair(v):
if isinstance(v, Iterable):
assert (len(v) == 2), 'len(v) != 2'
return v
return tuple(repeat(v, 2))
|
def infer_conv_output_dim(conv_op, input_dim, sample_inchannel):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, sample_inchannel, sample_seq_len, input_dim)
x = conv_op(x)
x = x.transpose(1, 2)
(bsz, seq) = x.size()[:2]
per_channel_dim = x.size()[3]
return (x.contiguous().view(bsz, seq, (- 1)).size((- 1)), per_channel_dim)
|
class VGGBlock(torch.nn.Module):
'\n VGG motibated cnn module https://arxiv.org/pdf/1409.1556.pdf\n\n Args:\n in_channels: (int) number of input channels (typically 1)\n out_channels: (int) number of output channels\n conv_kernel_size: convolution channels\n pooling_kernel_size: the size of the pooling window to take a max over\n num_conv_layers: (int) number of convolution layers\n input_dim: (int) input dimension\n conv_stride: the stride of the convolving kernel.\n Can be a single number or a tuple (sH, sW) Default: 1\n padding: implicit paddings on both sides of the input.\n Can be a single number or a tuple (padH, padW). Default: None\n layer_norm: (bool) if layer norm is going to be applied. Default: False\n\n Shape:\n Input: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)\n Output: BxCxTxfeat, i.e. (batch_size, input_size, timesteps, features)\n '
def __init__(self, in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim, conv_stride=1, padding=None, layer_norm=False):
assert (input_dim is not None), 'Need input_dim for LayerNorm and infer_conv_output_dim'
super(VGGBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_kernel_size = _pair(conv_kernel_size)
self.pooling_kernel_size = _pair(pooling_kernel_size)
self.num_conv_layers = num_conv_layers
self.padding = (tuple(((e // 2) for e in self.conv_kernel_size)) if (padding is None) else _pair(padding))
self.conv_stride = _pair(conv_stride)
self.layers = nn.ModuleList()
for layer in range(num_conv_layers):
conv_op = nn.Conv2d((in_channels if (layer == 0) else out_channels), out_channels, self.conv_kernel_size, stride=self.conv_stride, padding=self.padding)
self.layers.append(conv_op)
if layer_norm:
(conv_output_dim, per_channel_dim) = infer_conv_output_dim(conv_op, input_dim, (in_channels if (layer == 0) else out_channels))
self.layers.append(nn.LayerNorm(per_channel_dim))
input_dim = per_channel_dim
self.layers.append(nn.ReLU())
if (self.pooling_kernel_size is not None):
pool_op = nn.MaxPool2d(kernel_size=self.pooling_kernel_size, ceil_mode=True)
self.layers.append(pool_op)
(self.total_output_dim, self.output_dim) = infer_conv_output_dim(pool_op, input_dim, out_channels)
def forward(self, x):
for (i, _) in enumerate(self.layers):
x = self.layers[i](x)
return x
|
@register_optimizer('adadelta')
class Adadelta(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-06, metavar='EPS', help='term added to the denominator to improve numerical stability')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay}
|
@register_optimizer('adafactor')
class FairseqAdafactor(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adafactor(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar='E', help='epsilons for Adafactor optimizer')
parser.add_argument('--clip-threshold', type=float, default=1.0, metavar='C', help='threshold for clipping update root mean square')
parser.add_argument('--decay-rate', type=float, default=(- 0.8), metavar='D', help='decay rate of the second moment estimator')
parser.add_argument('--beta1', type=float, default=None, metavar='B', help='beta for first moment estimator. Optional')
parser.add_argument('--scale-parameter', action='store_true', help='scale learning rate by root mean square of parameter.')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--warmup-init', action='store_true', help='use relative step for warm-up learning rate schedule')
parser.add_argument('--relative-step', action='store_true', help='set learning rate to inverse square root of timestep.If false, external learning rate applied')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n Note : Convergence issues empirically observed with fp16 on.\n Might require search for appropriate configuration.\n '
return {'lr': self.args.lr[0], 'eps': eval(self.args.adafactor_eps), 'clip_threshold': self.args.clip_threshold, 'beta1': self.args.beta1, 'decay_rate': self.args.decay_rate, 'scale_parameter': self.args.scale_parameter, 'weight_decay': self.args.weight_decay, 'relative_step': self.args.relative_step, 'warmup_init': self.args.warmup_init}
|
class Adafactor(torch.optim.Optimizer):
'Implements Adafactor algorithm.\n\n This implementation is based on:\n `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constans for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of\n final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square\n gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient\n (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if true, learning rate is scaled by root mean square of\n parameter (default: True)\n relative_step (bool): if true, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n '
def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False):
defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init)
super(Adafactor, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
def _get_lr(self, param_group, param_state):
rel_step_sz = param_group['lr']
if param_group['relative_step']:
min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01)
rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step'])))
param_scale = 1.0
if param_group['scale_parameter']:
param_scale = max(param_group['eps'][1], param_state['RMS'])
return (param_scale * rel_step_sz)
def _get_options(self, param_group, param_shape):
factored = (len(param_shape) >= 2)
use_first_moment = (param_group['beta1'] is not None)
return (factored, use_first_moment)
def _rms(self, tensor):
return (tensor.norm(2) / (tensor.numel() ** 0.5))
def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col, output):
r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1)).unsqueeze((- 1))).rsqrt_().unsqueeze((- 1))
c_factor = exp_avg_sq_col.unsqueeze((- 2)).rsqrt()
torch.mul(r_factor, c_factor, out=output)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adafactor does not support sparse gradients.')
state = self.state[p]
grad_shape = grad.shape
(factored, use_first_moment) = self._get_options(group, grad_shape)
if (len(state) == 0):
state['step'] = 0
if use_first_moment:
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).type_as(grad)
state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).type_as(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
else:
if use_first_moment:
state['exp_avg'] = state['exp_avg'].type_as(grad)
if factored:
state['exp_avg_sq_row'] = state['exp_avg_sq_row'].type_as(grad)
state['exp_avg_sq_col'] = state['exp_avg_sq_col'].type_as(grad)
else:
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(grad)
p_data_fp32 = p.data.float()
state['step'] += 1
state['RMS'] = self._rms(p_data_fp32)
group['lr'] = self._get_lr(group, state)
beta2t = (1.0 - math.pow(state['step'], group['decay_rate']))
update = ((grad ** 2) + group['eps'][0])
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
exp_avg_sq_row.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 1)))
exp_avg_sq_col.mul_(beta2t).add_((1.0 - beta2t), update.mean(dim=(- 2)))
self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col, update)
update.mul_(grad)
else:
exp_avg_sq = state['exp_avg_sq']
exp_avg_sq.mul_(beta2t).add_((1.0 - beta2t), update)
torch.rsqrt(exp_avg_sq, out=update).mul_(grad)
update.div_(max(1.0, (self._rms(update) / group['clip_threshold'])))
update.mul_(group['lr'])
if use_first_moment:
exp_avg = state['exp_avg']
exp_avg.mul_(group['beta1']).add_((1 - group['beta1']), update)
update = exp_avg
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
p_data_fp32.add_((- update))
p.data.copy_(p_data_fp32)
return loss
|
@register_optimizer('adagrad')
class Adagrad(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'weight_decay': self.args.weight_decay}
|
@register_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
'Adam optimizer for fairseq.\n\n Important note: this optimizer corresponds to the "AdamW" variant of\n Adam in its weight decay behavior. As such, it is most closely\n analogous to torch.optim.AdamW from PyTorch.\n '
def __init__(self, args, params):
super().__init__(args)
if torch.cuda.is_available():
try:
from apex.optimizers import FusedAdam as _FusedAdam
self._optimizer = FusedAdam(params, **self.optimizer_config)
except ImportError:
self._optimizer = Adam(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay}
def average_params(self):
'Reduce Params is only used during BMUF distributed training.'
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM)
|
class Adam(torch.optim.Optimizer):
'Implements Adam algorithm.\n\n This implementation is modified from torch.optim.Adam based on:\n `Fixed Weight Decay Regularization in Adam`\n (see https://arxiv.org/abs/1711.05101)\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
if amsgrad:
state['max_exp_avg_sq'] = state['max_exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
p_data_fp32.addcdiv_((- step_size), exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
|
class FusedAdam(torch.optim.Optimizer):
"\n Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via\n ``python setup.py install --cuda_ext --cpp_ext``.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Compared to the original version in Apex, the fairseq version casts grads\n and params to FP32 internally to support ``--memory-efficient-fp16``.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square. (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False) NOT SUPPORTED in FusedAdam!\n eps_inside_sqrt (boolean, optional): in the 'update parameters' step,\n adds eps to the bias-corrected second moment estimate before\n evaluating square root instead of adding it to the square root of\n second moment estimate as in the original paper. (default: False)\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n "
def __init__(self, params, lr=0.001, bias_correction=True, betas=(0.9, 0.999), eps=1e-08, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False):
global fused_adam_cuda
import importlib
fused_adam_cuda = importlib.import_module('fused_adam_cuda')
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(FusedAdam, self).__init__(params, defaults)
self.eps_mode = (0 if eps_inside_sqrt else 1)
@property
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None, grads=None, scale=1.0, grad_norms=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n grads (list of tensors, optional): weight gradient to use for the\n optimizer update. If gradients have type torch.half, parameters\n are expected to be in type torch.float. (default: None)\n output params (list of tensors, optional): A reduced precision copy\n of the updated weights written out in addition to the regular\n updated weights. Have to be of same type as gradients. (default: None)\n scale (float, optional): factor to divide gradient tensor values\n by before applying to weights. (default: 1)\n '
loss = None
if (closure is not None):
loss = closure()
if (grads is None):
grads_group = ([None] * len(self.param_groups))
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif (type(grads[0]) != list):
grads_group = [grads]
else:
grads_group = grads
if (grad_norms is None):
grad_norms = ([None] * len(self.param_groups))
for (group, grads_this_group, grad_norm) in zip(self.param_groups, grads_group, grad_norms):
if (grads_this_group is None):
grads_this_group = ([None] * len(group['params']))
combined_scale = scale
if (group['max_grad_norm'] > 0):
clip = (((grad_norm / scale) + 1e-06) / group['max_grad_norm'])
if (clip > 1):
combined_scale = (clip * scale)
bias_correction = (1 if group['bias_correction'] else 0)
for (p, grad) in zip(group['params'], grads_this_group):
if ((p.grad is None) and (grad is None)):
continue
if (grad is None):
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
out_p = p.data
fused_adam_cuda.adam(p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, beta2, group['eps'], combined_scale, state['step'], self.eps_mode, bias_correction, group['weight_decay'])
return loss
|
@register_optimizer('adamax')
class FairseqAdamax(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adamax(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adamax-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'betas': eval(self.args.adamax_betas), 'eps': self.args.adamax_eps, 'weight_decay': self.args.weight_decay, 'bias_correction': (not self.args.no_bias_correction)}
|
class Adamax(torch.optim.Optimizer):
'Implements Adamax algorithm (a variant of Adam based on infinity norm).\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`__.\n\n Compared to the version in PyTorch, this version implements a fix for weight decay.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 2e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n bias_correction (bool, optional): enable bias correction (default: True)\n\n __ https://arxiv.org/abs/1412.6980\n '
def __init__(self, params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, bias_correction=True):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)
super(Adamax, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adamax does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_inf'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_inf'] = state['exp_inf'].type_as(p_data_fp32)
(exp_avg, exp_inf) = (state['exp_avg'], state['exp_inf'])
(beta1, beta2) = group['betas']
eps = group['eps']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
torch.max(exp_inf.mul_(beta2), grad.abs_(), out=exp_inf)
step_size = group['lr']
if group['bias_correction']:
bias_correction = (1 - (beta1 ** state['step']))
step_size /= bias_correction
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
p_data_fp32.addcdiv_((- step_size), exp_avg, exp_inf.add(eps))
p.data.copy_(p_data_fp32)
return loss
|
class FairseqBMUF(FairseqOptimizer):
'\n Implements incremental block distributed data parallelism similar to\n https://ieeexplore.ieee.org/document/7472805\n\n Paper title: Scalable training of deep learning machines by incremental\n block training with intra-block parallel optimization and blockwise\n model-update filtering\n '
def __init__(self, args, optimizer):
super().__init__(args)
self._optimizer = optimizer
self._num_updates = 0
self.sync_iter = self.args.global_sync_iter
self.block_momentum = self.args.block_momentum
self.block_lr = self.args.block_lr
self._reset_local_data()
self.warmup_iteration = self.args.warmup_iterations
self.use_nbm = self.args.use_nbm
self.initial_state = self._optimizer.state_dict()
self.average_sync = self.args.average_sync
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--block-lr', default=1, type=float, help='block learning rate for bmuf')
parser.add_argument('--block-momentum', default=0.875, type=float, help='block momentum for bmuf')
parser.add_argument('--global-sync-iter', default=50, type=int, help='Iteration for syncing global model')
parser.add_argument('--warmup-iterations', default=500, type=int, help='warmup iterations for model to broadcast')
parser.add_argument('--use-nbm', default=True, action='store_true', help='Specify whether you want to use classical BM / Nesterov BM')
parser.add_argument('--average-sync', default=True, action='store_true', help='Specify whether you want to average the local momentum after each sync')
@property
def optimizer(self):
return self._optimizer.optimizer
@property
def optimizer_config(self):
return self._optimizer.optimizer_config
def get_lr(self):
return self._optimizer.get_lr()
def set_lr(self, lr):
self._optimizer.set_lr(lr)
def state_dict(self):
return self._optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self._optimizer.load_state_dict(state_dict, optimizer_overrides)
def multiply_grads(self, c):
'Multiplies grads by a constant *c*.'
self._optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm):
'Clips gradient norm.'
return self._optimizer.clip_grad_norm(max_norm)
def average_params(self):
self._optimizer.average_params()
def _block_sync(self):
if (self.block_momentum != 0):
self._calc_grad()
self._avg_grad_from_all_gpus()
if (self.block_momentum != 0):
self._update_global_model()
if self.average_sync:
self.average_params()
def _is_warmup_end(self):
if (self.get_num_updates() == self.warmup_iteration):
return True
return False
def _is_bmuf_iter(self):
if ((self.get_num_updates() > self.warmup_iteration) and ((self.get_num_updates() % self.sync_iter) == 0)):
return True
return False
def _warmup_sync(self, root_rank=0):
for param in self.params:
dist.broadcast(param.data, src=root_rank)
if self.average_sync:
self._optimizer.average_params()
else:
self._optimizer.load_state_dict(self.initial_state)
self._reset_local_data()
def step(self, closure=None):
'Performs a single optimization step.'
self._optimizer.step(closure)
self.set_num_updates((self.get_num_updates() + 1))
if self._is_warmup_end():
self._warmup_sync()
elif self._is_bmuf_iter():
self._block_sync()
def zero_grad(self):
'Clears the gradients of all optimized parameters.'
self._optimizer.zero_grad()
def get_num_updates(self):
'Get the number of parameters updates.'
return self._num_updates
def set_num_updates(self, num_updates):
'Set the number of parameters updates.'
self._num_updates = num_updates
@torch.no_grad()
def _reset_local_data(self):
self.global_params = [torch.zeros_like(p.data) for p in self.params]
self.smoothed_grads = [p.data.new_zeros(p.data.size()) for p in self.params]
self.grads = [p.data.new_zeros(p.data.size()) for p in self.params]
for (param, global_param) in zip(self.params, self.global_params):
global_param.copy_(param.data)
@torch.no_grad()
def _calc_grad(self):
for (index, (param, global_param)) in enumerate(zip(self.params, self.global_params)):
self.grads[index] = (global_param - param.data)
def _avg_grad_from_all_gpus(self):
for (index, param) in enumerate(self.params):
sync_para = (param.data if (self.block_momentum == 0) else self.grads[index])
sync_para /= float(dist.get_world_size())
dist.all_reduce(sync_para, op=dist.ReduceOp.SUM)
@torch.no_grad()
def _update_global_model(self):
for (index, (param, global_param, smoothed_grad, grad)) in enumerate(zip(self.params, self.global_params, self.smoothed_grads, self.grads)):
smoothed_grad = ((self.block_momentum * smoothed_grad) + (self.block_lr * grad))
param.data.copy_((global_param - smoothed_grad))
if self.use_nbm:
param.data.copy_((param.data - (self.block_momentum * smoothed_grad)))
self.smoothed_grads[index] = smoothed_grad
global_param.copy_(param.data)
|
class FairseqOptimizer(object):
def __init__(self, args):
super().__init__()
self.args = args
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
pass
@property
def optimizer(self):
'Return a torch.optim.optimizer.Optimizer instance.'
if (not hasattr(self, '_optimizer')):
raise NotImplementedError
if (not isinstance(self._optimizer, torch.optim.Optimizer)):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
raise NotImplementedError
@property
def params(self):
'Return an iterable of the parameters held by the optimizer.'
for param_group in self.optimizer.param_groups:
for p in param_group['params']:
(yield p)
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
'Return the current learning rate.'
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
'Set the learning rate.'
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
"Return the optimizer's state dict."
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
'Load an optimizer state dict.\n\n In general we should prefer the configuration of the existing optimizer\n instance (e.g., learning rate) over that found in the state_dict. This\n allows us to resume training from a checkpoint using a new set of\n optimizer args.\n '
self.optimizer.load_state_dict(state_dict)
if ((optimizer_overrides is not None) and (len(optimizer_overrides) > 0)):
for group in self.optimizer.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
'Computes the sum of gradients of the given tensor w.r.t. graph leaves.'
loss.backward()
def multiply_grads(self, c):
'Multiplies grads by a constant *c*.'
for p in self.params:
if (p.grad is not None):
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
'Clips gradient norm.'
if (max_norm > 0):
return torch.nn.utils.clip_grad_norm_(self.params, max_norm)
else:
return math.sqrt(sum(((p.grad.data.norm() ** 2) for p in self.params if (p.grad is not None))))
def step(self, closure=None):
'Performs a single optimization step.'
self.optimizer.step(closure)
def zero_grad(self):
'Clears the gradients of all optimized parameters.'
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
@property
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, 'supports_memory_efficient_fp16'):
return self.optimizer.supports_memory_efficient_fp16
return False
def average_params(self):
pass
|
class DynamicLossScaler(object):
def __init__(self, init_scale=(2.0 ** 15), scale_factor=2.0, scale_window=2000, tolerance=0.05, threshold=None):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self.threshold = threshold
self._iter = 0
self._last_overflow_iter = (- 1)
self._last_rescale_iter = (- 1)
self._overflows_since_rescale = 0
def update_scale(self, overflow):
iter_since_rescale = (self._iter - self._last_rescale_iter)
if overflow:
self._last_overflow_iter = self._iter
self._overflows_since_rescale += 1
pct_overflow = (self._overflows_since_rescale / float(iter_since_rescale))
if (pct_overflow >= self.tolerance):
self._decrease_loss_scale()
self._last_rescale_iter = self._iter
self._overflows_since_rescale = 0
elif (((self._iter - self._last_overflow_iter) % self.scale_window) == 0):
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._iter
self._iter += 1
def _decrease_loss_scale(self):
self.loss_scale /= self.scale_factor
if (self.threshold is not None):
self.loss_scale = max(self.loss_scale, self.threshold)
@staticmethod
def has_overflow(grad_norm):
if ((grad_norm == float('inf')) or (grad_norm != grad_norm)):
return True
return False
|
class _FP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def build_fp32_params(cls, params):
total_param_size = sum((p.data.numel() for p in params))
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:(offset + numel)].copy_(p.data.view((- 1)))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
return fp32_params
def state_dict(self):
"Return the optimizer's state dict."
state_dict = self.fp32_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
'Load an optimizer state dict.\n\n In general we should prefer the configuration of the existing optimizer\n instance (e.g., learning rate) over that found in the state_dict. This\n allows us to resume training from a checkpoint using a new set of\n optimizer args.\n '
if ('loss_scale' in state_dict):
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
'Computes the sum of gradients of the given tensor w.r.t. graph leaves.\n\n Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this\n function additionally dynamically scales the loss to avoid gradient\n underflow.\n '
loss = (loss * self.scaler.loss_scale)
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.0):
if self._needs_sync:
offset = 0
for p in self.fp16_params:
if (not p.requires_grad):
continue
grad_data = (p.grad.data if (p.grad is not None) else p.data.new_zeros(p.data.shape))
numel = grad_data.numel()
self.fp32_params.grad.data[offset:(offset + numel)].copy_(grad_data.view((- 1)))
offset += numel
self.fp32_params.grad.data.mul_((multiply_grads / self.scaler.loss_scale))
self._needs_sync = False
def multiply_grads(self, c):
'Multiplies grads by a constant ``c``.'
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
else:
self.fp32_params.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
'Clips gradient norm and updates dynamic loss scaler.'
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm)
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if (self.scaler.loss_scale <= self.min_loss_scale):
raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.min_loss_scale))
raise OverflowError(('setting loss scale to: ' + str(self.scaler.loss_scale)))
return grad_norm
def step(self, closure=None):
'Performs a single optimization step.'
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
offset = 0
for p in self.fp16_params:
if (not p.requires_grad):
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:(offset + numel)].view_as(p.data))
offset += numel
def zero_grad(self):
'Clears the gradients of all optimized parameters.'
for p in self.fp16_params:
p.grad = None
self._needs_sync = False
|
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
'\n Wrap an *optimizer* to support FP16 (mixed precision) training.\n '
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if (getattr(args, 'fp16_scale_window', None) is None):
if (len(args.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
scale_window = (((2 ** 14) / args.distributed_world_size) / args.update_freq[0])
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale)
self.min_loss_scale = self.args.min_loss_scale
@classmethod
def build_optimizer(cls, args, params):
'\n Args:\n args (argparse.Namespace): fairseq args\n params (iterable): iterable of parameters to optimize\n '
fp32_params = cls.build_fp32_params(params)
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
return cls(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
|
class MemoryEfficientFP16Optimizer(optim.FairseqOptimizer):
'\n Wrap an *optimizer* to support FP16 (mixed precision) training.\n\n Compared to :class:`fairseq.optim.FP16Optimizer`, this version does not\n maintain an FP32 copy of the model. We instead expect the optimizer to\n convert the gradients to FP32 internally and sync the results back to the\n FP16 model params. This significantly reduces memory usage but slightly\n increases the time spent in the optimizer.\n\n Since this wrapper depends on specific functionality in the wrapped\n optimizer (i.e., on-the-fly conversion of grads to FP32), only certain\n optimizers can be wrapped. This is determined by the\n *supports_memory_efficient_fp16* property.\n '
def __init__(self, args, params, optimizer):
if (not optimizer.supports_memory_efficient_fp16):
raise ValueError('Unsupported optimizer: {}'.format(optimizer.__class__.__name__))
super().__init__(args)
self.wrapped_optimizer = optimizer
if (getattr(args, 'fp16_scale_window', None) is None):
if (len(args.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
scale_window = (((2 ** 14) / args.distributed_world_size) / args.update_freq[0])
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale)
@classmethod
def build_optimizer(cls, args, params):
'\n Args:\n args (argparse.Namespace): fairseq args\n params (iterable): iterable of parameters to optimize\n '
fp16_optimizer = optim.build_optimizer(args, params)
return cls(args, params, fp16_optimizer)
@property
def optimizer(self):
return self.wrapped_optimizer.optimizer
@property
def optimizer_config(self):
return self.wrapped_optimizer.optimizer_config
def get_lr(self):
return self.wrapped_optimizer.get_lr()
def set_lr(self, lr):
self.wrapped_optimizer.set_lr(lr)
def state_dict(self):
"Return the optimizer's state dict."
state_dict = self.wrapped_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
'Load an optimizer state dict.\n\n In general we should prefer the configuration of the existing optimizer\n instance (e.g., learning rate) over that found in the state_dict. This\n allows us to resume training from a checkpoint using a new set of\n optimizer args.\n '
if ('loss_scale' in state_dict):
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
groups = self.optimizer.param_groups
saved_groups = state_dict['param_groups']
id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))}
for (k, v) in state_dict['state'].items():
if (k in id_map):
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
'Computes the sum of gradients of the given tensor w.r.t. graph leaves.\n\n Compared to :func:`fairseq.optim.FairseqOptimizer.backward`, this\n function additionally dynamically scales the loss to avoid gradient\n underflow.\n '
loss = (loss * self.scaler.loss_scale)
loss.backward()
self._grads_are_scaled = True
def _unscale_grads(self, multiply_grads=1.0):
if self._grads_are_scaled:
self._grads_are_scaled = False
self.wrapped_optimizer.multiply_grads((multiply_grads / self.scaler.loss_scale))
else:
assert (multiply_grads == 1.0)
def multiply_grads(self, c):
'Multiplies grads by a constant *c*.'
if self._grads_are_scaled:
self._unscale_grads(c)
else:
self.wrapped_optimizer.multiply_grads(c)
def clip_grad_norm(self, max_norm):
'Clips gradient norm and updates dynamic loss scaler.'
self._unscale_grads()
grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm)
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if (self.scaler.loss_scale <= self.args.min_loss_scale):
raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.args.min_loss_scale))
raise OverflowError(('setting loss scale to: ' + str(self.scaler.loss_scale)))
return grad_norm
def step(self, closure=None):
'Performs a single optimization step.'
self._unscale_grads()
self.wrapped_optimizer.step(closure)
def zero_grad(self):
'Clears the gradients of all optimized parameters.'
self.wrapped_optimizer.zero_grad()
self._grads_are_scaled = False
|
@register_lr_scheduler('cosine')
class CosineSchedule(FairseqLRScheduler):
'Assign LR based on a cyclical schedule that follows the cosine function.\n\n See https://arxiv.org/pdf/1608.03983.pdf for details.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n max learning rate (``--max-lr``).\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i))\n\n where ``t_curr`` is current percentage of updates within the current period\n range and ``t_i`` is the current period range, which is scaled by ``t_mul``\n after every iteration.\n '
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with cosine. Consider --lr-scheduler=fixed instead.')
warmup_end_lr = args.max_lr
if (args.warmup_init_lr < 0):
args.warmup_init_lr = args.lr[0]
self.min_lr = args.lr[0]
self.max_lr = args.max_lr
assert (self.max_lr > self.min_lr), 'max_lr must be more than lr'
self.t_mult = args.t_mult
self.period = args.lr_period_updates
if (self.period <= 0):
assert (args.max_update >= 0), 'Either --max_update or --lr-period-updates must be set'
self.period = (args.max_update - args.warmup_updates)
if (args.warmup_updates > 0):
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
else:
self.lr_step = 1
self.warmup_updates = args.warmup_updates
self.lr_shrink = args.lr_shrink
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=(- 1), type=float, metavar='LR', help='initial number of updates per period')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing')
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
if (num_updates < self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
else:
curr_updates = (num_updates - self.args.warmup_updates)
if (self.t_mult != 1):
i = math.floor(math.log((1 - ((curr_updates / self.period) * (1 - self.t_mult))), self.t_mult))
t_i = ((self.t_mult ** i) * self.period)
t_curr = (curr_updates - (((1 - (self.t_mult ** i)) / (1 - self.t_mult)) * self.period))
else:
i = math.floor((curr_updates / self.period))
t_i = self.period
t_curr = (curr_updates - (self.period * i))
lr_shrink = (self.lr_shrink ** i)
min_lr = (self.min_lr * lr_shrink)
max_lr = (self.max_lr * lr_shrink)
self.lr = (min_lr + ((0.5 * (max_lr - min_lr)) * (1 + math.cos(((math.pi * t_curr) / t_i)))))
self.optimizer.set_lr(self.lr)
return self.lr
|
class FairseqLRScheduler(object):
def __init__(self, args, optimizer):
super().__init__()
if (not isinstance(optimizer, FairseqOptimizer)):
raise ValueError('optimizer must be an instance of FairseqOptimizer')
self.args = args
self.optimizer = optimizer
self.best = None
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
pass
def state_dict(self):
'Return the LR scheduler state dict.'
return {'best': self.best}
def load_state_dict(self, state_dict):
'Load an LR scheduler state dict.'
self.best = state_dict['best']
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
if (val_loss is not None):
if (self.best is None):
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
'Update the learning rate after each update.'
return self.optimizer.get_lr()
|
@register_lr_scheduler('fixed')
class FixedSchedule(FairseqLRScheduler):
'Decay the LR on a fixed schedule.'
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0)
self.lr = args.lr[0]
if (args.warmup_updates > 0):
self.warmup_factor = (1.0 / args.warmup_updates)
else:
self.warmup_factor = 1
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
def get_next_lr(self, epoch):
lrs = self.args.lr
if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)):
next_lr = lrs[min(epoch, (len(lrs) - 1))]
else:
next_lr = (lrs[(- 1)] * (self.args.lr_shrink ** ((epoch + 1) - self.args.force_anneal)))
return next_lr
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr((self.warmup_factor * self.lr))
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
if ((self.args.warmup_updates > 0) and (num_updates <= self.args.warmup_updates)):
self.warmup_factor = (num_updates / float(self.args.warmup_updates))
self.optimizer.set_lr((self.warmup_factor * self.lr))
return self.optimizer.get_lr()
|
@register_lr_scheduler('inverse_sqrt')
class InverseSquareRootSchedule(FairseqLRScheduler):
'Decay the LR based on the inverse square root of the update number.\n\n We also support a warmup phase where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter we decay proportional to the number of\n updates, with a decay factor set to align with the configured learning rate.\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n\n After warmup::\n\n decay_factor = args.lr * sqrt(args.warmup_updates)\n lr = decay_factor / sqrt(update_num)\n '
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with inverse_sqrt. Consider --lr-scheduler=fixed instead.')
warmup_end_lr = args.lr[0]
if (args.warmup_init_lr < 0):
args.warmup_init_lr = (0 if (args.warmup_updates > 0) else warmup_end_lr)
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
self.decay_factor = (warmup_end_lr * (args.warmup_updates ** 0.5))
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
if (num_updates < self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
else:
self.lr = (self.decay_factor * (num_updates ** (- 0.5)))
self.optimizer.set_lr(self.lr)
return self.lr
|
@register_lr_scheduler('polynomial_decay')
class PolynomialDecaySchedule(FairseqLRScheduler):
'Decay the LR on a fixed schedule.'
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
args.warmup_updates = (getattr(args, 'warmup_updates', 0) or 0)
self.lr = args.lr[0]
if (args.warmup_updates > 0):
self.warmup_factor = (1.0 / args.warmup_updates)
else:
self.warmup_factor = 1
self.end_learning_rate = args.end_learning_rate
self.total_num_update = args.total_num_update
self.power = args.power
self.optimizer.set_lr((self.warmup_factor * self.lr))
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--end-learning-rate', default=0.0, type=float)
parser.add_argument('--power', default=1.0, type=float)
parser.add_argument('--total-num-update', default=1000000, type=int)
def get_next_lr(self, epoch):
lrs = self.args.lr
if ((self.args.force_anneal is None) or (epoch < self.args.force_anneal)):
next_lr = lrs[min(epoch, (len(lrs) - 1))]
else:
next_lr = self.optimizer.get_lr()
return next_lr
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr((self.warmup_factor * self.lr))
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
if ((self.args.warmup_updates > 0) and (num_updates <= self.args.warmup_updates)):
self.warmup_factor = (num_updates / float(self.args.warmup_updates))
lr = (self.warmup_factor * self.lr)
elif (num_updates >= self.total_num_update):
lr = self.end_learning_rate
else:
warmup = self.args.warmup_updates
lr_range = (self.lr - self.end_learning_rate)
pct_remaining = (1 - ((num_updates - warmup) / (self.total_num_update - warmup)))
lr = ((lr_range * (pct_remaining ** self.power)) + self.end_learning_rate)
self.optimizer.set_lr(lr)
return self.optimizer.get_lr()
|
@register_lr_scheduler('reduce_lr_on_plateau')
class ReduceLROnPlateau(FairseqLRScheduler):
'\n Decay the LR by a factor every time the validation loss plateaus.\n Also comes with optional warmup phase, where we linearly increase the learning rate\n from some initial learning rate (``--warmup-init-lr``) until the configured\n learning rate (``--lr``). Thereafter the lr is adjusted according to original reduce_on_plateau scheme\n\n During warmup::\n\n lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)\n lr = lrs[update_num]\n '
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with reduce_lr_on_plateau. Consider --lr-scheduler=fixed instead.')
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer.optimizer, patience=0, factor=args.lr_shrink, threshold=args.lr_threshold)
warmup_end_lr = args.lr[0]
'if no warm up, sets initial lr to be args.lr[0]'
if (args.warmup_init_lr < 0):
args.warmup_init_lr = (0 if (args.warmup_updates > 0) else warmup_end_lr)
' linearly warmup for the first args.warmup_updates'
if (args.warmup_updates > 0):
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
' this flag is either set from arg when no warm up, or set by step_update() when warmup finishes'
self.warmup_end = (True if (args.warmup_updates <= 0) else False)
' initial learning rate'
'this self.lr is used only during init and/or warm up period'
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing, lr_new = (lr * lr_shrink)')
parser.add_argument('--lr-threshold', default=0.0001, type=float, metavar='LT', help='Threshold for measuring the new optimum, to only focus on significant changes')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
def state_dict(self):
'Return the LR scheduler state dict.'
return {'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch}
def load_state_dict(self, state_dict):
'Load an LR scheduler state dict.'
self.lr_scheduler.best = state_dict['best']
if ('last_epoch' in state_dict):
self.lr_scheduler.last_epoch = state_dict['last_epoch']
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch if warmup finishes'
' otherwise no update of lr on epoch boundaries'
if ((val_loss is not None) and (self.warmup_end is True)):
self.lr_scheduler.step(val_loss, epoch)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
' if there is warmup'
if (self.args.warmup_updates > 0):
if (num_updates <= self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
self.optimizer.set_lr(self.lr)
elif (self.warmup_end is False):
self.warmup_end = True
'else do nothing '
return self.optimizer.get_lr()
|
@register_lr_scheduler('tri_stage')
class TriStageLRSchedule(FairseqLRScheduler):
'Tristage learning rate schedulr\n\n Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf\n\n Similar to inverse_squre_root scheduler, but tri_stage learning rate employs\n three stages LR scheduling:\n\n - warmup stage, starting from `lr` * `init_lr_scale`, linearly\n increased to `lr` in `warmup_steps` iterations\n\n - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps`\n iterations\n\n - decay stage, after hold stage, decay LR exponetially to\n `lr` * `final_lr_scale` in `decay_steps`;\n after that LR is keep as `final_lr_scale` * `lr`\n\n During warmup::\n\n init_lr = args.init_lr_scale * args.lr\n lrs = torch.linspace(init_lr, args.lr, args.warmup_steps)\n lr = lrs[update_num]\n\n During hold::\n\n lr = args.lr\n\n During decay::\n\n decay_factor = - math.log(args.final_lr_scale) / args.decay_steps\n lr = args.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor)\n\n After that::\n\n lr = args.lr * args.final_lr_scale\n '
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with tri-stage lr. Consider --lr-scheduler=fixed instead.')
self.peak_lr = args.lr[0]
self.init_lr = (args.init_lr_scale * args.lr[0])
self.final_lr = (args.final_lr_scale * args.lr[0])
self.warmup_steps = args.warmup_steps
self.hold_steps = args.hold_steps
self.decay_steps = args.decay_steps
self.warmup_rate = ((self.peak_lr - self.init_lr) / self.warmup_steps)
self.decay_factor = ((- math.log(args.final_lr_scale)) / args.decay_steps)
self.lr = self.init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--warmup-steps', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--hold-steps', default=20000, type=int, metavar='N', help='steps in hold stage.')
parser.add_argument('--decay-steps', default=60000, type=int, metavar='N', help='steps in decay stages')
parser.add_argument('--init-lr-scale', default=0.01, type=float, help='\n initial learning rate scale during warmup phase; default is 0.01')
parser.add_argument('--final-lr-scale', default=0.01, type=float, help='final learning rate scale; default to 0.01')
def _decide_stage(self, update_step):
'\n return stage, and the corresponding steps within the current stage\n '
if (update_step < self.warmup_steps):
return (0, update_step)
offset = self.warmup_steps
if (update_step < (offset + self.hold_steps)):
return (1, (update_step - offset))
offset += self.hold_steps
if (update_step <= (offset + self.decay_steps)):
return (2, (update_step - offset))
offset += self.decay_steps
return (3, (update_step - offset))
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
(stage, steps_in_stage) = self._decide_stage(num_updates)
if (stage == 0):
self.lr = (self.init_lr + (self.warmup_rate * steps_in_stage))
elif (stage == 1):
self.lr = self.peak_lr
elif (stage == 2):
self.lr = (self.peak_lr * math.exp(((- self.decay_factor) * steps_in_stage)))
elif (stage == 3):
self.lr = self.final_lr
else:
raise ValueError('Undefined stage')
self.optimizer.set_lr(self.lr)
return self.lr
|
@register_lr_scheduler('triangular')
class TriangularSchedule(FairseqLRScheduler):
'Assign LR based on a triangular cyclical schedule.\n\n See https://arxiv.org/pdf/1506.01186.pdf for details.\n '
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with triangular. Consider --lr-scheduler=fixed instead.')
lr = args.lr[0]
assert (args.max_lr > lr), 'max_lr must be more than lr'
self.min_lr = lr
self.max_lr = args.max_lr
self.stepsize = (args.lr_period_updates // 2)
self.lr_shrink = args.lr_shrink
self.shrink_min = args.shrink_min
self.lr = self.min_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
'Add arguments to the parser for this LR scheduler.'
parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period (cycle length)')
parser.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='shrink factor for annealing')
parser.add_argument('--shrink-min', action='store_true', help='if set, also shrinks min lr')
def step(self, epoch, val_loss=None):
'Update the learning rate at the end of the given epoch.'
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
'Update the learning rate after each update.'
cycle = math.floor((num_updates / (2 * self.stepsize)))
lr_shrink = (self.lr_shrink ** cycle)
max_lr = (self.max_lr * lr_shrink)
if self.shrink_min:
min_lr = (self.min_lr * lr_shrink)
else:
min_lr = self.min_lr
x = abs((((num_updates / self.stepsize) - (2 * (cycle + 1))) + 1))
self.lr = (min_lr + ((max_lr - min_lr) * max(0, (1 - x))))
self.optimizer.set_lr(self.lr)
return self.lr
|
@register_optimizer('nag')
class FairseqNAG(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = NAG(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--momentum', default=0.99, type=float, metavar='M', help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay}
|
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
lr = group['lr']
lr_old = group.get('lr_old', lr)
lr_correct = (lr / lr_old)
for p in group['params']:
if (p.grad is None):
continue
p_data_fp32 = p.data.float()
d_p = p.grad.data.float()
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
param_state['momentum_buffer'] = torch.zeros_like(d_p)
else:
param_state['momentum_buffer'] = param_state['momentum_buffer'].type_as(d_p)
buf = param_state['momentum_buffer']
if (weight_decay != 0):
p_data_fp32.mul_((1 - (lr * weight_decay)))
p_data_fp32.add_(((momentum * momentum) * lr_correct), buf)
p_data_fp32.add_(((- (1 + momentum)) * lr), d_p)
buf.mul_((momentum * lr_correct)).add_((- lr), d_p)
p.data.copy_(p_data_fp32)
group['lr_old'] = lr
return loss
|
@register_optimizer('sgd')
class SGD(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
'Add optimizer-specific arguments to the parser.'
parser.add_argument('--momentum', default=0.0, type=float, metavar='M', help='momentum factor')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
@property
def optimizer_config(self):
'\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n '
return {'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay}
|
def get_preprocessing_parser(default_task='translation'):
parser = get_parser('Preprocessing', default_task)
add_preprocess_args(parser)
return parser
|
def get_training_parser(default_task='translation'):
parser = get_parser('Trainer', default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
add_norm_args(parser)
return parser
|
def get_generation_parser(interactive=False, default_task='translation'):
parser = get_parser('Generation', default_task)
add_dataset_args(parser, gen=True)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
|
def get_interactive_generation_parser(default_task='translation'):
return get_generation_parser(interactive=True, default_task=default_task)
|
def get_eval_lm_parser(default_task='language_modeling'):
parser = get_parser('Evaluate Language Model', default_task)
add_dataset_args(parser, gen=True)
add_eval_lm_args(parser)
return parser
|
def get_validation_parser(default_task=None):
parser = get_parser('Validation', default_task)
add_dataset_args(parser, train=True)
group = parser.add_argument_group('Evaluation')
add_common_eval_args(group)
return parser
|
def add_norm_args(parser):
group = parser.add_argument_group('Normalization')
parser.add_argument('--encoder-norm-self', default='layer', choices=['layer', 'batch', 'power'], help='normalization scheme for encoder')
parser.add_argument('--encoder-norm-ff', default='layer', choices=['none', 'layer', 'group', 'batch', 'power'], help='normalization scheme for encoder')
parser.add_argument('--encoder-spec-norm', default=False, action='store_true')
parser.add_argument('--decoder-norm-self', default='layer', choices=['layer', 'group', 'batch', 'power'], help='normalization scheme for decoder')
parser.add_argument('--decoder-norm-ff', default='layer', choices=['none', 'layer', 'group', 'batch', 'power'])
group.add_argument('--dropout-type', default='none', choices=['none', 'bernoulli', 'gamma', 'gumbel', 'beta', 'laplace', 'chi', 'normal'], help='droptypes for all the layers')
group.add_argument('--dropout-gama', default=0.5, type=float, metavar='N', help='dropout gama for some noisy types')
return group
|
def eval_str_list(x, type=float):
if (x is None):
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
|
def eval_bool(x, default=False):
if (x is None):
return default
try:
return bool(eval(x))
except TypeError:
return default
|
def parse_args_and_arch(parser, input_args=None, parse_known=False, suppress_defaults=False):
if suppress_defaults:
args = parse_args_and_arch(parser, input_args=input_args, parse_known=parse_known, suppress_defaults=False)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for (k, v) in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(**{k: v for (k, v) in vars(args).items() if (v is not None)})
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
(args, _) = parser.parse_known_args(input_args)
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group('Model-specific configuration', argument_default=argparse.SUPPRESS)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if (choice is not None):
cls = REGISTRY['registry'][choice]
if hasattr(cls, 'add_args'):
cls.add_args(parser)
if hasattr(args, 'task'):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, 'use_bmuf', False):
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
if parse_known:
(args, extra) = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
if (hasattr(args, 'max_sentences_valid') and (args.max_sentences_valid is None)):
args.max_sentences_valid = args.max_sentences
if (hasattr(args, 'max_tokens_valid') and (args.max_tokens_valid is None)):
args.max_tokens_valid = args.max_tokens
if getattr(args, 'memory_efficient_fp16', False):
args.fp16 = True
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return (args, extra)
else:
return args
|
def get_parser(desc, default_task='translation'):
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
(usr_args, _) = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N', help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='', help='path to save logs for tensorboard, should match --logdir of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--tbmf-wrapper', action='store_true', help='[FB only] ')
parser.add_argument('--seed', default=1, type=int, metavar='N', help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-fp16', action='store_true', help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-init-scale', default=(2 ** 7), type=int, help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int, help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float, help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=0.0001, type=float, metavar='D', help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float, help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None, help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int, help='how often to clear the PyTorch CUDA cache (0 to disable)')
from fairseq.registry import REGISTRIES
for (registry_name, REGISTRY) in REGISTRIES.items():
parser.add_argument(('--' + registry_name.replace('_', '-')), default=REGISTRY['default'], choices=REGISTRY['registry'].keys())
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task')
return parser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.